forked from terraform-aws-modules/terraform-aws-eks
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.tf
486 lines (399 loc) · 16.6 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
data "aws_partition" "current" {}
data "aws_caller_identity" "current" {}
locals {
create = var.create && var.putin_khuylo
cluster_role = try(aws_iam_role.this[0].arn, var.iam_role_arn)
}
################################################################################
# Cluster
################################################################################
resource "aws_eks_cluster" "this" {
count = local.create ? 1 : 0
name = var.cluster_name
role_arn = local.cluster_role
version = var.cluster_version
enabled_cluster_log_types = var.cluster_enabled_log_types
vpc_config {
security_group_ids = compact(distinct(concat(var.cluster_additional_security_group_ids, [local.cluster_security_group_id])))
subnet_ids = coalescelist(var.control_plane_subnet_ids, var.subnet_ids)
endpoint_private_access = var.cluster_endpoint_private_access
endpoint_public_access = var.cluster_endpoint_public_access
public_access_cidrs = var.cluster_endpoint_public_access_cidrs
}
kubernetes_network_config {
ip_family = var.cluster_ip_family
service_ipv4_cidr = var.cluster_service_ipv4_cidr
}
dynamic "encryption_config" {
for_each = toset(var.cluster_encryption_config)
content {
provider {
key_arn = var.create_kms_key ? module.kms.key_arn : encryption_config.value.provider_key_arn
}
resources = encryption_config.value.resources
}
}
tags = merge(
var.tags,
var.cluster_tags,
)
timeouts {
create = lookup(var.cluster_timeouts, "create", null)
update = lookup(var.cluster_timeouts, "update", null)
delete = lookup(var.cluster_timeouts, "delete", null)
}
depends_on = [
aws_iam_role_policy_attachment.this,
aws_security_group_rule.cluster,
aws_security_group_rule.node,
aws_cloudwatch_log_group.this
]
}
resource "aws_ec2_tag" "cluster_primary_security_group" {
# This should not affect the name of the cluster primary security group
# Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2006
# Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2008
for_each = { for k, v in merge(var.tags, var.cluster_tags) : k => v if local.create && k != "Name" && var.create_cluster_primary_security_group_tags }
resource_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
key = each.key
value = each.value
}
resource "aws_cloudwatch_log_group" "this" {
count = local.create && var.create_cloudwatch_log_group ? 1 : 0
name = "/aws/eks/${var.cluster_name}/cluster"
retention_in_days = var.cloudwatch_log_group_retention_in_days
kms_key_id = var.cloudwatch_log_group_kms_key_id
tags = var.tags
}
################################################################################
# KMS Key
################################################################################
module "kms" {
source = "terraform-aws-modules/kms/aws"
version = "1.0.2" # Note - be mindful of Terraform/provider version compatibility between modules
create = local.create && var.create_kms_key
description = coalesce(var.kms_key_description, "${var.cluster_name} cluster encryption key")
key_usage = "ENCRYPT_DECRYPT"
deletion_window_in_days = var.kms_key_deletion_window_in_days
enable_key_rotation = var.enable_kms_key_rotation
# Policy
enable_default_policy = var.kms_key_enable_default_policy
key_owners = var.kms_key_owners
key_administrators = coalescelist(var.kms_key_administrators, [data.aws_caller_identity.current.arn])
key_users = concat([local.cluster_role], var.kms_key_users)
key_service_users = var.kms_key_service_users
source_policy_documents = var.kms_key_source_policy_documents
override_policy_documents = var.kms_key_override_policy_documents
# Aliases
aliases = concat(["eks/${var.cluster_name}"], var.kms_key_aliases)
tags = var.tags
}
################################################################################
# Cluster Security Group
# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
################################################################################
locals {
cluster_sg_name = coalesce(var.cluster_security_group_name, "${var.cluster_name}-cluster")
create_cluster_sg = local.create && var.create_cluster_security_group
cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id
cluster_security_group_rules = {
ingress_nodes_443 = {
description = "Node groups to cluster API"
protocol = "tcp"
from_port = 443
to_port = 443
type = "ingress"
source_node_security_group = true
}
egress_nodes_443 = {
description = "Cluster API to node groups"
protocol = "tcp"
from_port = 443
to_port = 443
type = "egress"
source_node_security_group = true
}
egress_nodes_kubelet = {
description = "Cluster API to node kubelets"
protocol = "tcp"
from_port = 10250
to_port = 10250
type = "egress"
source_node_security_group = true
}
}
}
resource "aws_security_group" "cluster" {
count = local.create_cluster_sg ? 1 : 0
name = var.cluster_security_group_use_name_prefix ? null : local.cluster_sg_name
name_prefix = var.cluster_security_group_use_name_prefix ? "${local.cluster_sg_name}${var.prefix_separator}" : null
description = var.cluster_security_group_description
vpc_id = var.vpc_id
tags = merge(
var.tags,
{ "Name" = local.cluster_sg_name },
var.cluster_security_group_tags
)
lifecycle {
create_before_destroy = true
}
}
resource "aws_security_group_rule" "cluster" {
for_each = { for k, v in merge(local.cluster_security_group_rules, var.cluster_security_group_additional_rules) : k => v if local.create_cluster_sg }
# Required
security_group_id = aws_security_group.cluster[0].id
protocol = each.value.protocol
from_port = each.value.from_port
to_port = each.value.to_port
type = each.value.type
# Optional
description = try(each.value.description, null)
cidr_blocks = try(each.value.cidr_blocks, null)
ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null)
prefix_list_ids = try(each.value.prefix_list_ids, [])
self = try(each.value.self, null)
source_security_group_id = try(
each.value.source_security_group_id,
try(each.value.source_node_security_group, false) ? local.node_security_group_id : null
)
}
################################################################################
# IRSA
# Note - this is different from EKS identity provider
################################################################################
data "tls_certificate" "this" {
count = local.create && var.enable_irsa ? 1 : 0
url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
}
resource "aws_iam_openid_connect_provider" "oidc_provider" {
count = local.create && var.enable_irsa ? 1 : 0
client_id_list = distinct(compact(concat(["sts.${local.dns_suffix}"], var.openid_connect_audiences)))
thumbprint_list = concat([data.tls_certificate.this[0].certificates[0].sha1_fingerprint], var.custom_oidc_thumbprints)
url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
tags = merge(
{ Name = "${var.cluster_name}-eks-irsa" },
var.tags
)
}
################################################################################
# IAM Role
################################################################################
locals {
create_iam_role = local.create && var.create_iam_role
iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster")
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
cluster_encryption_policy_name = coalesce(var.cluster_encryption_policy_name, "${local.iam_role_name}-ClusterEncryption")
# TODO - hopefully this can be removed once the AWS endpoint is named properly in China
# https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1904
dns_suffix = coalesce(var.cluster_iam_role_dns_suffix, data.aws_partition.current.dns_suffix)
}
data "aws_iam_policy_document" "assume_role_policy" {
count = local.create && var.create_iam_role ? 1 : 0
statement {
sid = "EKSClusterAssumeRole"
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["eks.${local.dns_suffix}"]
}
}
}
resource "aws_iam_role" "this" {
count = local.create_iam_role ? 1 : 0
name = var.iam_role_use_name_prefix ? null : local.iam_role_name
name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}${var.prefix_separator}" : null
path = var.iam_role_path
description = var.iam_role_description
assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
permissions_boundary = var.iam_role_permissions_boundary
force_detach_policies = true
# https://github.com/terraform-aws-modules/terraform-aws-eks/issues/920
# Resources running on the cluster are still generaring logs when destroying the module resources
# which results in the log group being re-created even after Terraform destroys it. Removing the
# ability for the cluster role to create the log group prevents this log group from being re-created
# outside of Terraform due to services still generating logs during destroy process
dynamic "inline_policy" {
for_each = var.create_cloudwatch_log_group ? [1] : []
content {
name = local.iam_role_name
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = ["logs:CreateLogGroup"]
Effect = "Deny"
Resource = aws_cloudwatch_log_group.this[0].arn
},
]
})
}
}
tags = merge(var.tags, var.iam_role_tags)
}
# Policies attached ref https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html
resource "aws_iam_role_policy_attachment" "this" {
for_each = local.create_iam_role ? toset(compact(distinct(concat([
"${local.policy_arn_prefix}/AmazonEKSClusterPolicy",
"${local.policy_arn_prefix}/AmazonEKSVPCResourceController",
], var.iam_role_additional_policies)))) : toset([])
policy_arn = each.value
role = aws_iam_role.this[0].name
}
# Using separate attachment due to `The "for_each" value depends on resource attributes that cannot be determined until apply`
resource "aws_iam_role_policy_attachment" "cluster_encryption" {
count = local.create_iam_role && var.attach_cluster_encryption_policy && length(var.cluster_encryption_config) > 0 ? 1 : 0
policy_arn = aws_iam_policy.cluster_encryption[0].arn
role = aws_iam_role.this[0].name
}
resource "aws_iam_policy" "cluster_encryption" {
count = local.create_iam_role && var.attach_cluster_encryption_policy && length(var.cluster_encryption_config) > 0 ? 1 : 0
name = var.cluster_encryption_policy_use_name_prefix ? null : local.cluster_encryption_policy_name
name_prefix = var.cluster_encryption_policy_use_name_prefix ? local.cluster_encryption_policy_name : null
description = var.cluster_encryption_policy_description
path = var.cluster_encryption_policy_path
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = [
"kms:Encrypt",
"kms:Decrypt",
"kms:ListGrants",
"kms:DescribeKey",
]
Effect = "Allow"
Resource = var.create_kms_key ? [module.kms.key_arn] : [for config in var.cluster_encryption_config : config.provider_key_arn]
},
]
})
tags = merge(var.tags, var.cluster_encryption_policy_tags)
}
################################################################################
# EKS Addons
################################################################################
resource "aws_eks_addon" "this" {
for_each = { for k, v in var.cluster_addons : k => v if local.create }
cluster_name = aws_eks_cluster.this[0].name
addon_name = try(each.value.name, each.key)
addon_version = lookup(each.value, "addon_version", null)
resolve_conflicts = lookup(each.value, "resolve_conflicts", null)
service_account_role_arn = lookup(each.value, "service_account_role_arn", null)
depends_on = [
module.fargate_profile,
module.eks_managed_node_group,
module.self_managed_node_group,
]
tags = var.tags
}
################################################################################
# EKS Identity Provider
# Note - this is different from IRSA
################################################################################
resource "aws_eks_identity_provider_config" "this" {
for_each = { for k, v in var.cluster_identity_providers : k => v if local.create }
cluster_name = aws_eks_cluster.this[0].name
oidc {
client_id = each.value.client_id
groups_claim = lookup(each.value, "groups_claim", null)
groups_prefix = lookup(each.value, "groups_prefix", null)
identity_provider_config_name = try(each.value.identity_provider_config_name, each.key)
issuer_url = try(each.value.issuer_url, aws_eks_cluster.this[0].identity[0].oidc[0].issuer)
required_claims = lookup(each.value, "required_claims", null)
username_claim = lookup(each.value, "username_claim", null)
username_prefix = lookup(each.value, "username_prefix", null)
}
tags = var.tags
}
################################################################################
# aws-auth configmap
################################################################################
locals {
node_iam_role_arns_non_windows = distinct(
compact(
concat(
[for group in module.eks_managed_node_group : group.iam_role_arn],
[for group in module.self_managed_node_group : group.iam_role_arn if group.platform != "windows"],
var.aws_auth_node_iam_role_arns_non_windows,
)
)
)
node_iam_role_arns_windows = distinct(
compact(
concat(
[for group in module.self_managed_node_group : group.iam_role_arn if group.platform == "windows"],
var.aws_auth_node_iam_role_arns_windows,
)
)
)
fargate_profile_pod_execution_role_arns = distinct(
compact(
concat(
[for group in module.fargate_profile : group.fargate_profile_pod_execution_role_arn],
var.aws_auth_fargate_profile_pod_execution_role_arns,
)
)
)
aws_auth_configmap_data = {
mapRoles = yamlencode(concat(
[for role_arn in local.node_iam_role_arns_non_windows : {
rolearn = role_arn
username = "system:node:{{EC2PrivateDNSName}}"
groups = [
"system:bootstrappers",
"system:nodes",
]
}
],
[for role_arn in local.node_iam_role_arns_windows : {
rolearn = role_arn
username = "system:node:{{EC2PrivateDNSName}}"
groups = [
"eks:kube-proxy-windows",
"system:bootstrappers",
"system:nodes",
]
}
],
# Fargate profile
[for role_arn in local.fargate_profile_pod_execution_role_arns : {
rolearn = role_arn
username = "system:node:{{SessionName}}"
groups = [
"system:bootstrappers",
"system:nodes",
"system:node-proxier",
]
}
],
var.aws_auth_roles
))
mapUsers = yamlencode(var.aws_auth_users)
mapAccounts = yamlencode(var.aws_auth_accounts)
}
}
resource "kubernetes_config_map" "aws_auth" {
count = var.create && var.create_aws_auth_configmap ? 1 : 0
metadata {
name = "aws-auth"
namespace = "kube-system"
}
data = local.aws_auth_configmap_data
lifecycle {
# We are ignoring the data here since we will manage it with the resource below
# This is only intended to be used in scenarios where the configmap does not exist
ignore_changes = [data]
}
}
resource "kubernetes_config_map_v1_data" "aws_auth" {
count = var.create && var.manage_aws_auth_configmap ? 1 : 0
force = true
metadata {
name = "aws-auth"
namespace = "kube-system"
}
data = local.aws_auth_configmap_data
depends_on = [
# Required for instances where the configmap does not exist yet to avoid race condition
kubernetes_config_map.aws_auth,
]
}