-
-
Notifications
You must be signed in to change notification settings - Fork 364
feat: add support for EKS Auto Mode #253
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
5905703
e6a4b2a
2e26411
465d102
204e286
df72204
324c1f0
f17e7e4
c7240ff
7a6b115
1003136
83ff5ec
624b112
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -14,6 +14,10 @@ locals { | |
} | ||
|
||
cloudwatch_log_group_name = "/aws/eks/${module.label.id}/cluster" | ||
|
||
node_role_arn_trimmed = can(trimspace(var.node_role_arn)) ? trimspace(var.node_role_arn) : "" | ||
|
||
auto_mode_enabled = var.cluster_auto_mode_enabled | ||
} | ||
|
||
module "label" { | ||
|
@@ -56,24 +60,50 @@ resource "aws_kms_alias" "cluster" { | |
resource "aws_eks_cluster" "default" { | ||
#bridgecrew:skip=BC_AWS_KUBERNETES_1:Allow permissive security group for public access, difficult to restrict without a VPN | ||
#bridgecrew:skip=BC_AWS_KUBERNETES_4:Let user decide on control plane logging, not necessary in non-production environments | ||
count = local.enabled ? 1 : 0 | ||
name = module.label.id | ||
tags = module.label.tags | ||
role_arn = local.eks_service_role_arn | ||
version = var.kubernetes_version | ||
enabled_cluster_log_types = var.enabled_cluster_log_types | ||
count = local.enabled ? 1 : 0 | ||
name = module.label.id | ||
tags = module.label.tags | ||
role_arn = local.eks_service_role_arn | ||
version = var.kubernetes_version | ||
enabled_cluster_log_types = var.enabled_cluster_log_types | ||
# Enabling EKS Auto Mode also requires that bootstrap_self_managed_addons is set to false | ||
bootstrap_self_managed_addons = var.bootstrap_self_managed_addons_enabled | ||
|
||
access_config { | ||
authentication_mode = var.access_config.authentication_mode | ||
bootstrap_cluster_creator_admin_permissions = var.access_config.bootstrap_cluster_creator_admin_permissions | ||
} | ||
|
||
# EKS Auto Mode | ||
dynamic "compute_config" { | ||
for_each = local.auto_mode_enabled ? [true] : [] | ||
|
||
content { | ||
enabled = true | ||
|
||
# Only set if both node_pools and node_role_arn are passed | ||
node_pools = (var.node_pools != null && length(var.node_pools) > 0) ? var.node_pools : null | ||
node_role_arn = (var.node_pools != null && length(var.node_pools) > 0) ? local.node_role_arn : null | ||
} | ||
} | ||
|
||
lifecycle { | ||
# bootstrap_cluster_creator_admin_permissions is documented as only applying | ||
# to the initial creation of the cluster, and being unreliable afterward, | ||
# so we want to ignore it except at cluster creation time. | ||
ignore_changes = [access_config[0].bootstrap_cluster_creator_admin_permissions] | ||
precondition { | ||
condition = !(local.auto_mode_enabled && var.bootstrap_self_managed_addons_enabled) | ||
error_message = "EKS Auto Mode cannot be enabled at the same time as bootstrap_self_managed_addons. Please disable one of them." | ||
} | ||
precondition { | ||
condition = ( | ||
var.create_node_role || | ||
length(var.node_pools) == 0 || | ||
local.node_role_arn_trimmed != "" | ||
) | ||
error_message = "If create_node_role is false and node_pools is set, node_role_arn must also be provided." | ||
} | ||
} | ||
|
||
dynamic "encryption_config" { | ||
|
@@ -98,18 +128,43 @@ resource "aws_eks_cluster" "default" { | |
|
||
dynamic "kubernetes_network_config" { | ||
for_each = local.use_ipv6 ? [] : compact([var.service_ipv4_cidr]) | ||
|
||
content { | ||
service_ipv4_cidr = kubernetes_network_config.value | ||
} | ||
} | ||
|
||
dynamic "kubernetes_network_config" { | ||
for_each = local.use_ipv6 ? [true] : [] | ||
|
||
content { | ||
ip_family = "ipv6" | ||
} | ||
} | ||
|
||
dynamic "kubernetes_network_config" { | ||
for_each = local.auto_mode_enabled ? [true] : [] | ||
|
||
content { | ||
dynamic "elastic_load_balancing" { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I believe this dynamic block is excessive. If auto_mode is not enabled, this network config won't be added to the resource. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for the comment! I believe it should still work fine — even if |
||
for_each = local.auto_mode_enabled ? [true] : [] | ||
content { | ||
enabled = true | ||
} | ||
} | ||
} | ||
} | ||
|
||
dynamic "storage_config" { | ||
for_each = local.auto_mode_enabled ? [true] : [] | ||
|
||
content { | ||
block_storage { | ||
enabled = true | ||
} | ||
} | ||
} | ||
|
||
dynamic "upgrade_policy" { | ||
for_each = var.upgrade_policy != null ? [var.upgrade_policy] : [] | ||
content { | ||
|
@@ -126,9 +181,14 @@ resource "aws_eks_cluster" "default" { | |
|
||
depends_on = [ | ||
aws_iam_role.default, | ||
aws_iam_role.node, | ||
aws_iam_role_policy_attachment.cluster_elb_service_role, | ||
aws_iam_role_policy_attachment.amazon_eks_cluster_policy, | ||
aws_iam_role_policy_attachment.amazon_eks_service_policy, | ||
aws_iam_role_policy_attachment.auto_mode_policies, | ||
aws_iam_role_policy_attachment.amazon_ec2_container_registry_read_only, | ||
aws_iam_role_policy_attachment.amazon_eks_cni_policy, | ||
aws_iam_role_policy_attachment.amazon_eks_worker_node_policy, | ||
aws_kms_alias.cluster, | ||
aws_cloudwatch_log_group.default, | ||
var.associated_security_group_ids, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Note this variable is declared but unused