diff --git a/modules/hetzner/README.md b/modules/hetzner/README.md
index 2f7abd3..5c7632a 100644
--- a/modules/hetzner/README.md
+++ b/modules/hetzner/README.md
@@ -61,6 +61,7 @@ No modules.
| [network\_subnet](#input\_network\_subnet) | Subnet of the main network | `string` | `"10.0.0.0/16"` | no |
| [network\_type](#input\_network\_type) | Type of network to use | `string` | `"cloud"` | no |
| [region](#input\_region) | Region to use. This covers multiple datacentres. | `string` | `"eu-central"` | no |
+| [schedule\_workloads\_on\_manager\_nodes](#input\_schedule\_workloads\_on\_manager\_nodes) | Allow scheduling of workloads of manager nodes. | `bool` | `true` | no |
| [ssh\_key](#input\_ssh\_key) | Path to the private SSH key | `string` | `"~/.ssh/id_ed25519"` | no |
| [ssh\_key\_public](#input\_ssh\_key\_public) | Path to the public SSH key | `string` | `"~/.ssh/id_ed25519.pub"` | no |
| [ssh\_port](#input\_ssh\_port) | Port to use for SSH access | `number` | `2244` | no |
diff --git a/modules/hetzner/k3s.tf b/modules/hetzner/k3s.tf
index f4894b5..159cf27 100644
--- a/modules/hetzner/k3s.tf
+++ b/modules/hetzner/k3s.tf
@@ -34,8 +34,17 @@ locals {
kube-proxy-arg = "metrics-bind-address=0.0.0.0"
kube-scheduler-arg = "bind-address=0.0.0.0"
node-label = [for l in var.k3s_manager_pool.labels : "${l.key}=${l.value}"]
- node-taint = [for t in var.k3s_manager_pool.taints : "${t.key}=${t.value}:${t.effect}"]
- service-cidr = var.k3s_service_cidr
+ node-taint = [for t in concat(
+ var.schedule_workloads_on_manager_nodes ? [] : [
+ {
+ key = "CriticalAddonsOnly"
+ value = "true"
+ effect = "NoExecute"
+ }
+ ],
+ var.k3s_manager_pool.taints
+ ) : "${t.key}=${t.value}:${t.effect}"]
+ service-cidr = var.k3s_service_cidr
tls-san = concat(
[local.k3s_access_address],
[for o in hcloud_server.manager : tolist(o.network)[0].ip]
diff --git a/modules/hetzner/variables.tf b/modules/hetzner/variables.tf
index 1235a56..f937b2b 100644
--- a/modules/hetzner/variables.tf
+++ b/modules/hetzner/variables.tf
@@ -172,6 +172,12 @@ variable "region" {
default = "eu-central"
}
+variable "schedule_workloads_on_manager_nodes" {
+ type = bool
+ description = "Allow scheduling of workloads of manager nodes."
+ default = true
+}
+
variable "ssh_key" {
type = string
description = "Path to the private SSH key"
diff --git a/modules/kubernetes/autoscaler.tf b/modules/kubernetes/autoscaler.tf
index ea7cba6..6f271fb 100644
--- a/modules/kubernetes/autoscaler.tf
+++ b/modules/kubernetes/autoscaler.tf
@@ -90,4 +90,26 @@ resource "helm_release" "cluster_autoscaler" {
name = "podAnnotations.secret"
value = sha512(yamlencode(kubernetes_secret_v1.cluster_autoscaler[count.index].data))
}
+
+ # Allow running on control plane nodes
+ dynamic "set" {
+ for_each = flatten([
+ for i, taint in local.control_plane_taints :
+ [
+ for k, v in taint :
+ [
+ {
+ name = "tolerations[${i}].${k}"
+ value = v
+ },
+ ]
+ ]
+ ])
+ iterator = each
+
+ content {
+ name = each.value.name
+ value = each.value.value
+ }
+ }
}
diff --git a/modules/kubernetes/hetzner.tf b/modules/kubernetes/hetzner.tf
index 065ef5c..a39d1ec 100644
--- a/modules/kubernetes/hetzner.tf
+++ b/modules/kubernetes/hetzner.tf
@@ -75,4 +75,30 @@ resource "helm_release" "hcloud_csi" {
name = "controller.podAnnotations.secret"
value = sha512(yamlencode(kubernetes_secret_v1.hcloud.data))
}
+
+ # Allow running on control plane nodes
+ dynamic "set" {
+ for_each = flatten([
+ for i, taint in local.control_plane_taints :
+ [
+ for k, v in taint :
+ [
+ {
+ name = "controller.tolerations[${i}].${k}"
+ value = v
+ },
+ {
+ name = "node.tolerations[${i}].${k}"
+ value = v
+ },
+ ]
+ ]
+ ])
+ iterator = each
+
+ content {
+ name = each.value.name
+ value = each.value.value
+ }
+ }
}
diff --git a/modules/kubernetes/locals.tf b/modules/kubernetes/locals.tf
index c09a472..58b7d70 100644
--- a/modules/kubernetes/locals.tf
+++ b/modules/kubernetes/locals.tf
@@ -13,6 +13,12 @@
# limitations under the License.
locals {
+ control_plane_taints = [
+ {
+ key = "CriticalAddonsOnly"
+ operator = "Exists"
+ },
+ ]
kubeconfig = yamldecode(var.kubeconfig)
kubeconfig_clusters = { for context in local.kubeconfig.clusters : context.name => context.cluster }
kubeconfig_users = { for context in local.kubeconfig.users : context.name => context.user }