diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 999a1c0649..e729d90432 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -86,3 +86,15 @@ repos: rev: v2.11.0 hooks: - id: pyproject-fmt + + - repo: https://github.com/antonbabenko/pre-commit-terraform + rev: v1.92.0 + hooks: + - id: terraform_fmt + files: \.tf$ + - id: terraform_validate + files: \.tf$ + - id: terraform_tflint + files: \.tf$ + - id: terraform_trivy + files: \.tf$ diff --git a/Terraform/Operational-Guide.md b/Terraform/Operational-Guide.md new file mode 100644 index 0000000000..68f290d81a --- /dev/null +++ b/Terraform/Operational-Guide.md @@ -0,0 +1,159 @@ +# OWASP Nest - AWS Infrastructure Operational Guide + +This document contains the complete operational guide for deploying and managing the OWASP Nest application infrastructure on AWS using Terraform. The project is designed to be modular, reusable, and secure, following industry best practices for managing infrastructure in a collaborative, open-source environment. + +## Project Overview + +This Terraform setup provisions a multi-environment (dev, staging, prod) infrastructure for the OWASP Nest application. It leverages a modular design to manage networking, compute, data, and storage resources independently. + +- **Environments:** Code is organized under `environments/` to provide strong isolation between `dev`, `staging`, and `production`. +- **Modules:** Reusable components are defined in `modules/` for consistency and maintainability. +- **State Management:** Terraform state is stored remotely in an S3 bucket. State locking is managed by DynamoDB to prevent conflicts and ensure safe, concurrent operations by multiple contributors. +- **Security:** All sensitive data is managed via AWS Secrets Manager. Network access is restricted using a least-privilege security group model. + +## Phased Rollout Plan + +The infrastructure will be built and deployed in a series of focused Pull Requests to ensure each foundational layer is stable and well-reviewed before building on top of it. + +1. **Phase 1: Foundational Networking (`modules/network`)** +2. **Phase 2: Data & Storage Tiers (`modules/database`, `modules/storage`, `modules/cache`)** +3. **Phase 3: Compute & IAM (`modules/compute`, `modules/iam`)** + +This document will be updated as each phase is completed. + +## Prerequisites + +Before you begin, ensure you have the following tools installed and configured: + +1. **Terraform:** [Install Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) (Version 1.3.0 or newer recommended). +2. **AWS CLI:** [Install and configure the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). You must have an active AWS profile configured with credentials that have sufficient permissions to create the resources. +3. **pre-commit:** [Install pre-commit](https://pre-commit.com/#installation) (`pip install pre-commit`). + +## Initial Setup (One-Time) + +This infrastructure requires an S3 bucket and a DynamoDB table for managing Terraform's remote state. These must be created manually before you can run `terraform init`. + +**Note:** The following commands are examples. Please ensure the chosen S3 bucket name is globally unique. + +1. **Define Environment Variables (Recommended):** + ```bash + # Run these in your terminal to make the next steps easier + export AWS_REGION="us-east-1" # Or your preferred AWS region + export TF_STATE_BUCKET="owasp-nest-tfstate-$(aws sts get-caller-identity --query Account --output text)" # Creates a unique bucket name + export TF_STATE_LOCK_TABLE="owasp-nest-tf-locks" + ``` + +2. **Create the S3 Bucket for Terraform State:** + *This bucket will store the `.tfstate` file, which is Terraform's map of your infrastructure.* + ```bash + if [ "${AWS_REGION}" = "us-east-1" ]; then + aws s3api create-bucket --bucket "${TF_STATE_BUCKET}" --region "${AWS_REGION}" + else + aws s3api create-bucket \ + --bucket "${TF_STATE_BUCKET}" \ + --region "${AWS_REGION}" \ + --create-bucket-configuration LocationConstraint=${AWS_REGION} + fi + + aws s3api put-bucket-versioning \ + --bucket ${TF_STATE_BUCKET} \ + --versioning-configuration Status=Enabled + + + aws s3api put-public-access-block \ + --bucket "${TF_STATE_BUCKET}" \ + --public-access-block-configuration 'BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true' + + aws s3api put-bucket-encryption \ + --bucket "${TF_STATE_BUCKET}" \ + --server-side-encryption-configuration '{"Rules":[{"ApplyServerSideEncryptionByDefault":{"SSEAlgorithm":"AES256"}}]}' + ``` + +3. **Create the DynamoDB Table for State Locking:** + *This table prevents multiple people from running `terraform apply` at the same time, which could corrupt the state file.* + ```bash + aws dynamodb create-table \ + --table-name ${TF_STATE_LOCK_TABLE} \ + --attribute-definitions AttributeName=LockID,AttributeType=S \ + --key-schema AttributeName=LockID,KeyType=HASH \ + --billing-mode PAY_PER_REQUEST \ + --region ${AWS_REGION} + ``` + *(Note: Switched to `PAY_PER_REQUEST` billing mode, which is more cost-effective for the infrequent use of a lock table).* + +4. **Install Pre-commit Hooks:** + *From the root of the repository, run this once to set up the automated code quality checks.* + ```bash + pre-commit install + ``` + +## Secret Population + +This project provisions placeholders for secrets in AWS Secrets Manager but does **not** populate them with values. You must do this manually for each environment. + +1. Navigate to the **AWS Secrets Manager** console in the correct region. +2. Find the secrets created by Terraform (e.g., `owasp-nest/dev/AppSecrets`, `owasp-nest/dev/DbCredentials`). +3. Click on a secret and choose **"Retrieve secret value"**. +4. Click **"Edit"** and populate the secret values. + + - **For `DbCredentials`:** Use the "Plaintext" tab and create a JSON structure. Terraform will automatically generate a strong password, but you can override it here if needed. + ```json + { + "username": "nestadmin", + "password": "a-very-strong-and-long-password" + } + ``` + + - **For `AppSecrets`:** Use the "Plaintext" tab and create a key/value JSON structure for all required application secrets. + ```json + { + "DJANGO_SECRET_KEY": "generate-a-strong-random-key-here", + "DJANGO_ALGOLIA_WRITE_API_KEY": "your-algolia-key", + "NEXT_PUBLIC_SENTRY_DSN": "your-sentry-dsn", + "GITHUB_TOKEN": "your-github-token" + } + ``` +5. Save the secret. Repeat for all required secrets and all environments. + +## Deployment Workflow + +To deploy an environment, navigate to its directory and run the standard Terraform workflow. + +1. **Navigate to the Environment Directory:** + ```bash + cd Terraform/environments/Dev + ``` + +2. **Create a `terraform.tfvars` file:** + *Copy the example file. This file is where you will customize variables for the environment.* + ```bash + cp terraform.tfvars.example terraform.tfvars + # Now edit terraform.tfvars with your specific values (e.g., your AWS account ID, desired region). + ``` + +3. **Initialize Terraform:** + *This downloads the necessary providers and configures the S3 backend.* + ```bash + terraform init + ``` + +4. **Plan the Deployment:** + *This creates an execution plan and shows you what changes will be made. Always review this carefully.* + ```bash + terraform plan + ``` + +5. **Apply the Changes:** + *This provisions the infrastructure on AWS. You will be prompted to confirm.* + ```bash + terraform apply + ``` + +## Module Overview + +- **`modules/network`**: Creates the foundational networking layer, including the VPC, subnets, NAT Gateway, and Application Load Balancer. +- **`modules/database`**: Provisions the AWS RDS for PostgreSQL instance, including its subnet group and security group. +- **`modules/cache`**: Provisions the AWS ElastiCache for Redis cluster. +- **`modules/storage`**: Creates the S3 buckets for public static assets and private media uploads, configured with secure defaults. +- **`modules/compute`**: Provisions all compute resources: the ECS Fargate service for the frontend, the EC2 instance for cron jobs, and the necessary IAM roles and security groups for all services. It also configures the ALB routing rules. +- **`modules/iam`**: (Future) A dedicated module for creating the various IAM roles. \ No newline at end of file diff --git a/Terraform/environments/.gitkeep b/Terraform/environments/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/Terraform/modules/.gitkeep b/Terraform/modules/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/Terraform/modules/01-Network/main.tf b/Terraform/modules/01-Network/main.tf new file mode 100644 index 0000000000..572b6cd460 --- /dev/null +++ b/Terraform/modules/01-Network/main.tf @@ -0,0 +1,536 @@ +# VPC and Core Networking + +resource "aws_vpc" "main" { + cidr_block = var.vpc_cidr + enable_dns_support = true + enable_dns_hostnames = true + + tags = merge( + var.tags, + { + Name = "${var.project_prefix}-${var.environment}-vpc" + } + ) +} + +resource "aws_internet_gateway" "main" { + vpc_id = aws_vpc.main.id + + tags = merge( + var.tags, + { + Name = "${var.project_prefix}-${var.environment}-igw" + } + ) +} + +# Subnets + +# Deploys a public and private subnet into each specified Availability Zone. + +resource "aws_subnet" "public" { + count = length(var.public_subnet_cidrs) + vpc_id = aws_vpc.main.id + cidr_block = var.public_subnet_cidrs[count.index] + availability_zone = var.availability_zones[count.index] + map_public_ip_on_launch = true + + tags = merge( + var.tags, + { + Name = "${var.project_prefix}-${var.environment}-public-subnet-${var.availability_zones[count.index]}" + } + ) +} + +resource "aws_subnet" "private" { + count = length(var.private_subnet_cidrs) + vpc_id = aws_vpc.main.id + cidr_block = var.private_subnet_cidrs[count.index] + availability_zone = var.availability_zones[count.index] + + tags = merge( + var.tags, + { + Name = "${var.project_prefix}-${var.environment}-private-subnet-${var.availability_zones[count.index]}" + } + ) +} + +# Routing and NAT Gateway for Private Subnets + +# We create a SINGLE NAT Gateway and a SINGLE private route table. This is a cost +# optimization but introduces a single-AZ egress SPOF compared to per-AZ NAT gateways. +# Scale to one NAT per AZ if higher availability is required. + +resource "aws_eip" "nat" { + tags = merge( + var.tags, + { + Name = "${var.project_prefix}-${var.environment}-nat-eip" + } + ) +} + +resource "aws_nat_gateway" "main" { + allocation_id = aws_eip.nat.id + subnet_id = aws_subnet.public[0].id + + tags = merge( + var.tags, + { + Name = "${var.project_prefix}-${var.environment}-nat-gw" + } + ) + + depends_on = [aws_internet_gateway.main] +} + +# A single route table for all public subnets. +resource "aws_route_table" "public" { + vpc_id = aws_vpc.main.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.main.id + } + + tags = merge( + var.tags, + { + Name = "${var.project_prefix}-${var.environment}-public-rt" + } + ) +} + +# Associating the single public route table with all public subnets. +resource "aws_route_table_association" "public" { + count = length(aws_subnet.public) + subnet_id = aws_subnet.public[count.index].id + route_table_id = aws_route_table.public.id +} + +# A single route table for ALL private subnets, pointing to the single NAT Gateway. +resource "aws_route_table" "private" { + vpc_id = aws_vpc.main.id + + route { + cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.main.id + } + + tags = merge( + var.tags, + { + Name = "${var.project_prefix}-${var.environment}-private-rt" + } + ) +} + +# Associate the single private route table with all private subnets. +resource "aws_route_table_association" "private" { + count = length(aws_subnet.private) + subnet_id = aws_subnet.private[count.index].id + route_table_id = aws_route_table.private.id +} + +# S3 Bucket for ALB Access Logs + +# This data source gets the AWS Account ID for the ELB service in the current region. +data "aws_elb_service_account" "current" {} + +# This data source gets the current AWS Account ID for policy construction. +data "aws_caller_identity" "current" {} + +# This is the primary bucket where the ALB will store its access logs. +# Only create this bucket if logging is enabled + +resource "aws_s3_bucket" "alb_access_logs" { #NOSONAR + count = var.enable_alb_access_logs ? 1 : 0 + bucket = var.alb_access_logs_bucket_name != "" ? var.alb_access_logs_bucket_name : "${var.project_prefix}-${var.environment}-alb-access-logs-${data.aws_caller_identity.current.account_id}" + + tags = merge( + var.tags, + { + Name = "${var.project_prefix}-${var.environment}-alb-access-logs" + } + ) +} + +resource "aws_s3_bucket_public_access_block" "alb_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + bucket = aws_s3_bucket.alb_access_logs[0].id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +# This is a SECOND bucket, used to store the access logs FOR the first bucket. +resource "aws_s3_bucket" "s3_server_access_logs" { #NOSONAR + count = var.enable_alb_access_logs ? 1 : 0 + + bucket = "${var.project_prefix}-${var.environment}-s3-access-logs-${data.aws_caller_identity.current.account_id}" + + tags = merge( + var.tags, + { + Name = "${var.project_prefix}-${var.environment}-s3-server-access-logs" + } + ) +} + + +resource "aws_s3_bucket_public_access_block" "s3_server_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + bucket = aws_s3_bucket.s3_server_access_logs[0].id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +# Bucket Versioning +resource "aws_s3_bucket_versioning" "alb_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + bucket = aws_s3_bucket.alb_access_logs[0].id + + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_versioning" "s3_server_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + bucket = aws_s3_bucket.s3_server_access_logs[0].id + + versioning_configuration { + status = "Enabled" + } +} + +# Buckets Encryption + +resource "aws_s3_bucket_server_side_encryption_configuration" "alb_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + bucket = aws_s3_bucket.alb_access_logs[0].id + + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "s3_server_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + bucket = aws_s3_bucket.s3_server_access_logs[0].id + + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + + } +} + +# BUCKETS LIFECYCLE POLICIES +resource "aws_s3_bucket_lifecycle_configuration" "alb_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + bucket = aws_s3_bucket.alb_access_logs[0].id + + rule { + id = "expire-old-logs" + status = "Enabled" + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 90 + storage_class = "GLACIER" + } + + expiration { + days = 365 + } + + noncurrent_version_expiration { + noncurrent_days = 30 + } + } +} + +resource "aws_s3_bucket_lifecycle_configuration" "s3_server_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + bucket = aws_s3_bucket.s3_server_access_logs[0].id + + rule { + id = "expire-old-logs" + status = "Enabled" + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + expiration { + days = 180 + } + + noncurrent_version_expiration { + noncurrent_days = 30 + } + } +} + + +resource "aws_s3_bucket_logging" "s3_server_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + + bucket = aws_s3_bucket.s3_server_access_logs[0].id + + # S3 server access logs bucket logs to itself in a different prefix + target_bucket = aws_s3_bucket.s3_server_access_logs[0].id + target_prefix = "self-logs/" +} + +resource "aws_s3_bucket_logging" "alb_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + + bucket = aws_s3_bucket.alb_access_logs[0].id + + target_bucket = aws_s3_bucket.s3_server_access_logs[0].id + target_prefix = "alb-bucket-logs/" +} + + +# This data source constructs the required IAM policy document for the ALB log bucket. +data "aws_iam_policy_document" "alb_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + + # This statement allows the ALB service to write logs to the bucket. + statement { + sid = "AllowALBToWriteLogs" + effect = "Allow" + actions = ["s3:PutObject"] + resources = ["${aws_s3_bucket.alb_access_logs[0].arn}/*"] + + principals { + type = "AWS" + identifiers = [data.aws_elb_service_account.current.arn] + } + } + + statement { + sid = "AllowALBToGetBucketACL" + effect = "Allow" + actions = ["s3:GetBucketAcl"] + resources = [aws_s3_bucket.alb_access_logs[0].arn] + + principals { + type = "AWS" + identifiers = [data.aws_elb_service_account.current.arn] + } + } + + # This statement denies any access to the bucket over insecure HTTP. + statement { + sid = "DenyInsecureTransport" + effect = "Deny" + actions = ["s3:*"] + resources = [ + aws_s3_bucket.alb_access_logs[0].arn, + "${aws_s3_bucket.alb_access_logs[0].arn}/*" + ] + + principals { + type = "*" + identifiers = ["*"] + } + + condition { + test = "Bool" + variable = "aws:SecureTransport" + values = ["false"] + } + } +} + +# Added bucket policy for S3 server access logs bucket to enforce HTTPS-only access + +data "aws_iam_policy_document" "s3_server_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + + # Allow S3 logging service to write logs + statement { + sid = "S3ServerAccessLogsPolicy" + effect = "Allow" + principals { + type = "Service" + identifiers = ["logging.s3.amazonaws.com"] + } + actions = [ + "s3:PutObject" + ] + resources = ["${aws_s3_bucket.s3_server_access_logs[0].arn}/*"] + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } + + # Deny any access over insecure HTTP + statement { + sid = "DenyInsecureTransport" + effect = "Deny" + actions = ["s3:*"] + resources = [ + aws_s3_bucket.s3_server_access_logs[0].arn, + "${aws_s3_bucket.s3_server_access_logs[0].arn}/*" + ] + principals { + type = "*" + identifiers = ["*"] + } + condition { + test = "Bool" + variable = "aws:SecureTransport" + values = ["false"] + } + } +} + +# This resource is needed to grant the ALB service permission to write to my S3 bucket. +resource "aws_s3_bucket_policy" "alb_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + bucket = aws_s3_bucket.alb_access_logs[0].id + policy = data.aws_iam_policy_document.alb_access_logs[0].json + + depends_on = [ + aws_s3_bucket_public_access_block.alb_access_logs + ] +} + +# Added bucket policy attachment for S3 server access logs bucket +resource "aws_s3_bucket_policy" "s3_server_access_logs" { + count = var.enable_alb_access_logs ? 1 : 0 + bucket = aws_s3_bucket.s3_server_access_logs[0].id + policy = data.aws_iam_policy_document.s3_server_access_logs[0].json + + depends_on = [ + aws_s3_bucket_public_access_block.s3_server_access_logs + ] +} + +# Application Load Balancer + +resource "aws_security_group" "alb" { + name = "${var.project_prefix}-${var.environment}-alb-sg" + description = "Controls access to the ALB" + vpc_id = aws_vpc.main.id + + + ingress { + protocol = "tcp" + from_port = 80 + to_port = 80 + cidr_blocks = ["0.0.0.0/0"] + description = "Allow HTTP traffic from anywhere for HTTPS redirection" + } + + ingress { + protocol = "tcp" + from_port = 443 + to_port = 443 + cidr_blocks = ["0.0.0.0/0"] + description = "Allow HTTPS traffic from anywhere" + } + + egress { + protocol = "-1" + from_port = 0 + to_port = 0 + cidr_blocks = ["0.0.0.0/0"] + description = "Allow all outbound traffic" + } + + tags = merge( + var.tags, + { + Name = "${var.project_prefix}-${var.environment}-alb-sg" + } + ) +} + +resource "aws_lb" "main" { + name = "${var.project_prefix}-${var.environment}-alb" + internal = false + load_balancer_type = "application" + security_groups = [aws_security_group.alb.id] + subnets = aws_subnet.public[*].id + drop_invalid_header_fields = true + + # Deletion protection should be enabled via a variable for production. + enable_deletion_protection = var.environment == "prod" ? true : false + + # Proper conditional for access_logs block + dynamic "access_logs" { #NOSONAR + for_each = var.enable_alb_access_logs ? [1] : [] + content { + bucket = aws_s3_bucket.alb_access_logs[0].bucket + enabled = true + prefix = "alb-logs" + } + } + + tags = merge( + var.tags, + { + Name = "${var.project_prefix}-${var.environment}-alb" + } + ) + + depends_on = [ + aws_s3_bucket_policy.alb_access_logs + ] +} + +resource "aws_lb_listener" "http" { + load_balancer_arn = aws_lb.main.arn + port = 80 + protocol = "HTTP" + + default_action { + type = "redirect" + redirect { + port = "443" + protocol = "HTTPS" + status_code = "HTTP_301" + } + } +} + +resource "aws_lb_listener" "https" { + load_balancer_arn = aws_lb.main.arn + port = 443 + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-TLS13-1-2-2021-06" + certificate_arn = var.acm_certificate_arn + + default_action { + type = "fixed-response" + fixed_response { + content_type = "text/plain" + message_body = "404: Not Found. No listener rule configured for this path." + status_code = "404" + } + } +} \ No newline at end of file diff --git a/Terraform/modules/01-Network/outputs.tf b/Terraform/modules/01-Network/outputs.tf new file mode 100644 index 0000000000..cf1bf4837e --- /dev/null +++ b/Terraform/modules/01-Network/outputs.tf @@ -0,0 +1,29 @@ +output "vpc_id" { + description = "The ID of the created VPC." + value = aws_vpc.main.id +} + +output "public_subnet_ids" { + description = "A list of IDs for the public subnets." + value = aws_subnet.public[*].id +} + +output "private_subnet_ids" { + description = "A list of IDs for the private subnets." + value = aws_subnet.private[*].id +} + +output "alb_security_group_id" { + description = "The ID of the security group attached to the ALB." + value = aws_security_group.alb.id +} + +output "alb_dns_name" { + description = "The DNS name of the Application Load Balancer." + value = aws_lb.main.dns_name +} + +output "alb_https_listener_arn" { + description = "The ARN of the ALB's HTTPS listener." + value = aws_lb_listener.https.arn +} \ No newline at end of file diff --git a/Terraform/modules/01-Network/variables.tf b/Terraform/modules/01-Network/variables.tf new file mode 100644 index 0000000000..fd7b09ae56 --- /dev/null +++ b/Terraform/modules/01-Network/variables.tf @@ -0,0 +1,70 @@ +variable "project_prefix" { + description = "A prefix used for naming all resources, e.g., 'owasp-nest'." + type = string + validation { + condition = can(regex("^[a-z0-9-]+$", var.project_prefix)) + error_message = "The project_prefix must contain only lowercase letters, numbers, and hyphens." + } +} + +variable "environment" { + description = "The deployment environment (e.g., 'dev', 'staging', 'prod')." + type = string + validation { + condition = contains(["dev", "staging", "prod"], var.environment) + error_message = "The environment must be one of: dev, staging, prod." + } +} + +variable "vpc_cidr" { + description = "The CIDR block for the VPC." + type = string + default = "10.0.0.0/16" +} + +variable "availability_zones" { + description = "A list of Availability Zones to deploy resources into. Must match the number of subnets. e.g., [\"us-east-1a\", \"us-east-1b\"]" + type = list(string) +} + +variable "public_subnet_cidrs" { + description = "A list of CIDR blocks for the public subnets. The number of CIDRs must match the number of availability_zones." + type = list(string) + validation { + condition = length(var.public_subnet_cidrs) > 0 && length(var.public_subnet_cidrs) == length(var.availability_zones) + error_message = "Provide at least one public subnet CIDR, and ensure its count matches availability_zones." +} +} + +variable "private_subnet_cidrs" { + description = "A list of CIDR blocks for the private subnets. The number of CIDRs must match the number of availability_zones." + type = list(string) + validation { + condition = length(var.private_subnet_cidrs) > 0 && length(var.private_subnet_cidrs) == length(var.availability_zones) + error_message = "Provide at least one private subnet CIDR, and ensure its count matches availability_zones." + } +} + +variable "acm_certificate_arn" { + description = "The ARN of the AWS Certificate Manager (ACM) certificate for the ALB's HTTPS listener." + type = string + # No default value, this must be provided by the root module. +} + +variable "tags" { + description = "A map of tags to apply to all resources." + type = map(string) + default = {} +} + +variable "enable_alb_access_logs" { + description = "Set to true to enable access logging for the Application Load Balancer." + type = bool + default = true +} + +variable "alb_access_logs_bucket_name" { + description = "The name of the S3 bucket to store ALB access logs. Must be globally unique. If left empty, a name will be generated." + type = string + default = "" +} \ No newline at end of file