Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/complete/fixtures.us-east-2.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ kubernetes_labels = {}
cluster_encryption_config_enabled = true

# When updating the Kubernetes version, also update the API and client-go version in test/src/go.mod
kubernetes_version = "1.29"
kubernetes_version = "1.32"

private_ipv6_enabled = false

Expand Down
8 changes: 8 additions & 0 deletions examples/complete/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ data "aws_iam_session_context" "current" {
locals {
enabled = module.this.enabled

auto_mode_enabled = var.cluster_auto_mode_enabled

private_ipv6_enabled = var.private_ipv6_enabled

# The usage of the specific kubernetes.io/cluster/* resource tags below are required
Expand Down Expand Up @@ -116,6 +118,11 @@ module "eks_cluster" {
upgrade_policy = var.upgrade_policy
zonal_shift_config = var.zonal_shift_config

cluster_auto_mode_enabled = local.auto_mode_enabled

create_node_role = var.create_node_role
node_pools = var.node_pools

access_entry_map = local.access_entry_map
access_config = {
authentication_mode = "API"
Expand All @@ -135,6 +142,7 @@ module "eks_cluster" {
}

module "eks_node_group" {
enabled = local.auto_mode_enabled
source = "cloudposse/eks-node-group/aws"
version = "3.2.0"

Expand Down
28 changes: 27 additions & 1 deletion examples/complete/variables.tf
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
variable "region" {

type = string
description = "AWS Region"
}
Expand All @@ -10,7 +11,7 @@

variable "kubernetes_version" {
type = string
default = "1.29"
default = "1.32"
description = "Desired Kubernetes master version. If you do not specify a value, the latest available version is used"
}

Expand Down Expand Up @@ -136,3 +137,28 @@
default = false
description = "Whether to use IPv6 addresses for the pods in the node group"
}

variable "cluster_auto_mode_enabled" {
type = bool
default = false
description = "Set to true to enable EKS Auto Mode"
}

variable "node_pools" {
type = list(string)
description = "Node pools for EKS Auto Mode. Valid values are 'general-purpose' and 'system'. Optional."
default = []
}

variable "create_node_role" {
type = bool
description = "Set to false to use an existing node_role_arn instead of creating one"
default = true
}


variable "node_role_arn" {

Check warning on line 160 in examples/complete/variables.tf

View workflow job for this annotation

GitHub Actions / terraform-module / CI / Lint (./examples/complete)

[tflint] reported by reviewdog 🐶 variable "node_role_arn" is declared but not used Raw Output: variables.tf:160:1: warning: variable "node_role_arn" is declared but not used ()

Check warning on line 160 in examples/complete/variables.tf

View workflow job for this annotation

GitHub Actions / terraform-module / CI / Lint (./examples/complete)

[tflint] reported by reviewdog 🐶 variable "node_role_arn" is declared but not used Raw Output: variables.tf:160:1: warning: variable "node_role_arn" is declared but not used ()

Check warning on line 160 in examples/complete/variables.tf

View workflow job for this annotation

GitHub Actions / terraform-module / CI / Lint (./examples/complete)

[tflint] reported by reviewdog 🐶 variable "node_role_arn" is declared but not used Raw Output: variables.tf:160:1: warning: variable "node_role_arn" is declared but not used ()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note this variable is declared but unused

type = string
description = "ARN of the node IAM role for Auto Mode. Required if node_pools is set."
default = null
}
62 changes: 61 additions & 1 deletion iam.tf
Original file line number Diff line number Diff line change
@@ -1,15 +1,24 @@
locals {
create_eks_service_role = local.enabled && var.create_eks_service_role
create_node_role = local.enabled && var.create_node_role

eks_service_role_arn = local.create_eks_service_role ? one(aws_iam_role.default[*].arn) : var.eks_cluster_service_role_arn
node_role_arn = local.create_node_role ? one(aws_iam_role.node[*].arn) : var.node_role_arn

auto_mode_policies = [
"arn:aws:iam::aws:policy/AmazonEKSBlockStoragePolicy",
"arn:aws:iam::aws:policy/AmazonEKSComputePolicy",
"arn:aws:iam::aws:policy/AmazonEKSLoadBalancingPolicy",
"arn:aws:iam::aws:policy/AmazonEKSNetworkingPolicy"
]
}

data "aws_iam_policy_document" "assume_role" {
count = local.create_eks_service_role ? 1 : 0

statement {
effect = "Allow"
actions = ["sts:AssumeRole"]
actions = concat(["sts:AssumeRole"], local.auto_mode_enabled ? ["sts:TagSession"] : [])

principals {
type = "Service"
Expand Down Expand Up @@ -41,6 +50,7 @@ resource "aws_iam_role_policy_attachment" "amazon_eks_service_policy" {
role = one(aws_iam_role.default[*].name)
}


# AmazonEKSClusterPolicy managed policy doesn't contain all necessary permissions to create
# ELB service-linked role required during LB provisioning by Kubernetes.
# Because of that, on a new AWS account (where load balancers have not been provisioned yet, `nginx-ingress` fails to provision a load balancer
Expand Down Expand Up @@ -88,3 +98,53 @@ resource "aws_iam_role_policy_attachment" "cluster_elb_service_role" {
policy_arn = one(aws_iam_policy.cluster_elb_service_role[*].arn)
role = one(aws_iam_role.default[*].name)
}

resource "aws_iam_role_policy_attachment" "auto_mode_policies" {
count = local.auto_mode_enabled && local.create_eks_service_role ? length(local.auto_mode_policies) : 0
policy_arn = element(local.auto_mode_policies, count.index)
role = one(aws_iam_role.default[*].name)
}

data "aws_iam_policy_document" "node_assume_role" {
count = local.create_node_role ? 1 : 0

statement {
effect = "Allow"
actions = ["sts:AssumeRole"]

principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}

resource "aws_iam_role" "node" {
count = local.create_node_role ? 1 : 0

name = "${module.label.id}-node"
assume_role_policy = one(data.aws_iam_policy_document.node_assume_role[*].json)
tags = module.label.tags
permissions_boundary = var.permissions_boundary
}

resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_policy" {
count = local.create_node_role ? 1 : 0

policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSWorkerNodePolicy", one(data.aws_partition.current[*].partition))
role = one(aws_iam_role.node[*].name)
}

resource "aws_iam_role_policy_attachment" "amazon_ec2_container_registry_read_only" {
count = local.create_node_role ? 1 : 0

policy_arn = format("arn:%s:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", one(data.aws_partition.current[*].partition))
role = one(aws_iam_role.node[*].name)
}

resource "aws_iam_role_policy_attachment" "amazon_eks_cni_policy" {
count = local.create_node_role ? 1 : 0

policy_arn = format("arn:%s:iam::aws:policy/AmazonEKS_CNI_Policy", one(data.aws_partition.current[*].partition))
role = one(aws_iam_role.node[*].name)
}
72 changes: 66 additions & 6 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@ locals {
}

cloudwatch_log_group_name = "/aws/eks/${module.label.id}/cluster"

node_role_arn_trimmed = can(trimspace(var.node_role_arn)) ? trimspace(var.node_role_arn) : ""

auto_mode_enabled = var.cluster_auto_mode_enabled
}

module "label" {
Expand Down Expand Up @@ -56,24 +60,50 @@ resource "aws_kms_alias" "cluster" {
resource "aws_eks_cluster" "default" {
#bridgecrew:skip=BC_AWS_KUBERNETES_1:Allow permissive security group for public access, difficult to restrict without a VPN
#bridgecrew:skip=BC_AWS_KUBERNETES_4:Let user decide on control plane logging, not necessary in non-production environments
count = local.enabled ? 1 : 0
name = module.label.id
tags = module.label.tags
role_arn = local.eks_service_role_arn
version = var.kubernetes_version
enabled_cluster_log_types = var.enabled_cluster_log_types
count = local.enabled ? 1 : 0
name = module.label.id
tags = module.label.tags
role_arn = local.eks_service_role_arn
version = var.kubernetes_version
enabled_cluster_log_types = var.enabled_cluster_log_types
# Enabling EKS Auto Mode also requires that bootstrap_self_managed_addons is set to false
bootstrap_self_managed_addons = var.bootstrap_self_managed_addons_enabled

access_config {
authentication_mode = var.access_config.authentication_mode
bootstrap_cluster_creator_admin_permissions = var.access_config.bootstrap_cluster_creator_admin_permissions
}

# EKS Auto Mode
dynamic "compute_config" {
for_each = local.auto_mode_enabled ? [true] : []

content {
enabled = true

# Only set if both node_pools and node_role_arn are passed
node_pools = (var.node_pools != null && length(var.node_pools) > 0) ? var.node_pools : null
node_role_arn = (var.node_pools != null && length(var.node_pools) > 0) ? local.node_role_arn : null
}
}

lifecycle {
# bootstrap_cluster_creator_admin_permissions is documented as only applying
# to the initial creation of the cluster, and being unreliable afterward,
# so we want to ignore it except at cluster creation time.
ignore_changes = [access_config[0].bootstrap_cluster_creator_admin_permissions]
precondition {
condition = !(local.auto_mode_enabled && var.bootstrap_self_managed_addons_enabled)
error_message = "EKS Auto Mode cannot be enabled at the same time as bootstrap_self_managed_addons. Please disable one of them."
}
precondition {
condition = (
var.create_node_role ||
length(var.node_pools) == 0 ||
local.node_role_arn_trimmed != ""
)
error_message = "If create_node_role is false and node_pools is set, node_role_arn must also be provided."
}
}

dynamic "encryption_config" {
Expand All @@ -98,18 +128,43 @@ resource "aws_eks_cluster" "default" {

dynamic "kubernetes_network_config" {
for_each = local.use_ipv6 ? [] : compact([var.service_ipv4_cidr])

content {
service_ipv4_cidr = kubernetes_network_config.value
}
}

dynamic "kubernetes_network_config" {
for_each = local.use_ipv6 ? [true] : []

content {
ip_family = "ipv6"
}
}

dynamic "kubernetes_network_config" {
for_each = local.auto_mode_enabled ? [true] : []

content {
dynamic "elastic_load_balancing" {

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe this dynamic block is excessive. If auto_mode is not enabled, this network config won't be added to the resource.

Copy link
Author

@romulofranca romulofranca Jun 12, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the comment! I believe it should still work fine — even if auto_mode_enabled is false, the other kubernetes_network_config blocks (like IPv4 or IPv6) might still be added if their conditions are met. Each block runs independently, so they don’t block each other. Let me know if you see anything I might’ve missed!

for_each = local.auto_mode_enabled ? [true] : []
content {
enabled = true
}
}
}
}

dynamic "storage_config" {
for_each = local.auto_mode_enabled ? [true] : []

content {
block_storage {
enabled = true
}
}
}

dynamic "upgrade_policy" {
for_each = var.upgrade_policy != null ? [var.upgrade_policy] : []
content {
Expand All @@ -126,9 +181,14 @@ resource "aws_eks_cluster" "default" {

depends_on = [
aws_iam_role.default,
aws_iam_role.node,
aws_iam_role_policy_attachment.cluster_elb_service_role,
aws_iam_role_policy_attachment.amazon_eks_cluster_policy,
aws_iam_role_policy_attachment.amazon_eks_service_policy,
aws_iam_role_policy_attachment.auto_mode_policies,
aws_iam_role_policy_attachment.amazon_ec2_container_registry_read_only,
aws_iam_role_policy_attachment.amazon_eks_cni_policy,
aws_iam_role_policy_attachment.amazon_eks_worker_node_policy,
aws_kms_alias.cluster,
aws_cloudwatch_log_group.default,
var.associated_security_group_ids,
Expand Down
5 changes: 5 additions & 0 deletions outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -99,3 +99,8 @@ output "cloudwatch_log_group_kms_key_id" {
description = "KMS Key ID to encrypt AWS CloudWatch logs"
value = var.cloudwatch_log_group_kms_key_id
}

output "node_role_arn" {
value = local.node_role_arn
description = "The ARN of the node IAM role being used (created or provided)."
}
71 changes: 71 additions & 0 deletions test/src/examples_complete_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -213,3 +213,74 @@ func TestExamplesCompleteDisabled(t *testing.T) {
match := re.FindString(results)
assert.Equal(t, "Resources: 0 added, 0 changed, 0 destroyed.", match, "Applying with enabled=false should not create any resources")
}

func TestExamplesAutoMode(t *testing.T) {
// Generate a random ID for resource uniqueness
randId := strings.ToLower(random.UniqueId())
attributes := []string{randId}

// Configure Terraform options for the test
terraformOptions := &terraform.Options{
TerraformDir: "../../examples/complete",
Upgrade: true,
VarFiles: []string{"fixtures.us-east-2.tfvars"},
Vars: map[string]interface{}{
"attributes": attributes,
"cluster_auto_mode_enabled": true, // Enable EKS auto mode
"bootstrap_self_managed_addons_enabled": false, // Disable bootstrap addons
"create_node_role": true, // Create node IAM role
"node_pools": []string{"system", "general-purpose"},// Define node pools
},
}

// Ensure resources are destroyed on test crash or completion
defer runtime.HandleCrash(func(i interface{}) {
terraform.Destroy(t, terraformOptions)
})
defer terraform.Destroy(t, terraformOptions)

// Initialize and apply Terraform configuration
terraform.InitAndApply(t, terraformOptions)

// Validate VPC CIDR output
vpcCidr := terraform.Output(t, terraformOptions, "vpc_cidr")
assert.Equal(t, "172.16.0.0/16", vpcCidr)

// Validate private subnet CIDRs
privateSubnetCidrs := terraform.OutputList(t, terraformOptions, "private_subnet_cidrs")
assert.Equal(t, []string{"172.16.0.0/19", "172.16.32.0/19"}, privateSubnetCidrs)

// Validate public subnet CIDRs
publicSubnetCidrs := terraform.OutputList(t, terraformOptions, "public_subnet_cidrs")
assert.Equal(t, []string{"172.16.96.0/19", "172.16.128.0/19"}, publicSubnetCidrs)

// Validate EKS cluster ID output
eksClusterId := terraform.Output(t, terraformOptions, "eks_cluster_id")
assert.Equal(t, "eg-test-eks-"+randId+"-cluster", eksClusterId)

// In auto mode, node group outputs should be empty
eksNodeGroupId := terraform.Output(t, terraformOptions, "eks_node_group_id")
assert.Equal(t, "", eksNodeGroupId)

eksNodeGroupRoleName := terraform.Output(t, terraformOptions, "eks_node_group_role_name")
assert.Equal(t, "", eksNodeGroupRoleName)

eksNodeGroupStatus := terraform.Output(t, terraformOptions, "eks_node_group_status")
assert.Equal(t, "", eksNodeGroupStatus)

// Create AWS session for EKS API calls
sess := session.Must(session.NewSession(&aws.Config{
Region: aws.String("us-east-2"),
}))

// Describe the EKS cluster to verify its status
eksSvc := eks.New(sess)
input := &eks.DescribeClusterInput{
Name: aws.String("eg-test-eks-" + randId + "-cluster"),
}
result, err := eksSvc.DescribeCluster(input)
assert.NoError(t, err)
assert.Equal(t, "ACTIVE", aws.StringValue(result.Cluster.Status), "Expected EKS cluster status to be 'ACTIVE', but got '%s'", aws.StringValue(result.Cluster.Status))

fmt.Println("EKS cluster is available (Auto Mode)")
}
Loading
Loading