From e845357c392aa790cffb631dec6b6b5a93c74790 Mon Sep 17 00:00:00 2001 From: Adrian Pedriza Date: Mon, 11 Nov 2024 18:01:16 +0100 Subject: [PATCH] Add envtest for testing controllers Signed-off-by: Adrian Pedriza --- cmd/main.go | 1 - go.mod | 10 +- go.sum | 2 + internal/controller/controlplane/helper.go | 8 +- .../k0s_controlplane_controller.go | 66 +- .../k0s_controlplane_controller_test.go | 1329 ++++++++++++++++- .../controller/controlplane/suite_test.go | 37 + internal/controller/controlplane/util.go | 4 + internal/test/envtest/environment.go | 274 ++++ internal/test/envtest/generic_provider.go | 73 + 10 files changed, 1768 insertions(+), 36 deletions(-) create mode 100644 internal/controller/controlplane/suite_test.go create mode 100644 internal/test/envtest/environment.go create mode 100644 internal/test/envtest/generic_provider.go diff --git a/cmd/main.go b/cmd/main.go index 953785976..1be092a64 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -231,7 +231,6 @@ func main() { if err = (&controlplane.K0sController{ Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), ClientSet: clientSet, RESTConfig: restConfig, }).SetupWithManager(mgr); err != nil { diff --git a/go.mod b/go.mod index 2f93d9a68..19577f341 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.22.0 require ( github.com/cloudflare/cfssl v1.6.4 github.com/go-logr/logr v1.4.2 + github.com/gobuffalo/flect v1.0.2 github.com/google/uuid v1.6.0 github.com/imdario/mergo v0.3.16 github.com/k0sproject/k0s v1.27.2-0.20230504131248-94378e521a29 @@ -16,8 +17,11 @@ require ( github.com/stretchr/testify v1.9.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.30.3 + k8s.io/apiextensions-apiserver v0.30.3 k8s.io/apimachinery v0.30.3 k8s.io/client-go v0.30.3 + k8s.io/klog/v2 v2.120.1 + k8s.io/kubectl v0.30.3 k8s.io/kubernetes v1.30.3 k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 sigs.k8s.io/controller-runtime v0.18.5 @@ -57,7 +61,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 // indirect golang.org/x/crypto v0.27.0 golang.org/x/sync v0.8.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.24.0 gotest.tools/v3 v3.4.0 // indirect helm.sh/helm/v3 v3.11.3 // indirect k8s.io/kube-aggregator v0.27.2 // indirect @@ -90,7 +94,6 @@ require ( github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/gobuffalo/flect v1.0.2 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -152,6 +155,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/mod v0.20.0 // indirect golang.org/x/net v0.28.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sys v0.25.0 // indirect @@ -166,14 +170,12 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/apiextensions-apiserver v0.30.3 // indirect k8s.io/apiserver v0.30.3 // indirect k8s.io/cloud-provider v0.27.1 // indirect k8s.io/cluster-bootstrap v0.30.3 // indirect k8s.io/component-base v0.30.3 // indirect k8s.io/component-helpers v0.30.3 // indirect k8s.io/controller-manager v0.30.3 // indirect - k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kms v0.30.3 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/kubelet v0.27.1 // indirect diff --git a/go.sum b/go.sum index fe89767ef..ee1c316c1 100644 --- a/go.sum +++ b/go.sum @@ -521,6 +521,8 @@ k8s.io/kube-aggregator v0.30.3 h1:hy5zfQ7p6BuJgc/XtGp3GBh2MPfOj6b1n3raKKMHOQE= k8s.io/kube-aggregator v0.30.3/go.mod h1:2SP0IckvQoOwwZN8lmtWUnTZTgIpwOWvidWtxyqLwuk= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kubectl v0.30.3 h1:YIBBvMdTW0xcDpmrOBzcpUVsn+zOgjMYIu7kAq+yqiI= +k8s.io/kubectl v0.30.3/go.mod h1:IcR0I9RN2+zzTRUa1BzZCm4oM0NLOawE6RzlDvd1Fpo= k8s.io/kubelet v0.30.3 h1:KvGWDdhzD0vEyDyGTCjsDc8D+0+lwRMw3fJbfQgF7ys= k8s.io/kubelet v0.30.3/go.mod h1:D9or45Vkzcqg55CEiqZ8dVbwP3Ksj7DruEVRS9oq3Ys= k8s.io/kubernetes v1.30.3 h1:A0qoXI1YQNzrQZiff33y5zWxYHFT/HeZRK98/sRDJI0= diff --git a/internal/controller/controlplane/helper.go b/internal/controller/controlplane/helper.go index f41313a97..97c259556 100644 --- a/internal/controller/controlplane/helper.go +++ b/internal/controller/controlplane/helper.go @@ -50,7 +50,7 @@ func (c *K0sController) createMachine(ctx context.Context, name string, cluster if err != nil { return nil, fmt.Errorf("error generating machine: %w", err) } - _ = ctrl.SetControllerReference(kcp, machine, c.Scheme) + _ = ctrl.SetControllerReference(kcp, machine, c.Client.Scheme()) err = c.Client.Patch(ctx, machine, client.Apply, &client.PatchOptions{ FieldManager: "k0smotron", @@ -226,7 +226,11 @@ func (c *K0sController) generateMachineFromTemplate(ctx context.Context, name st return nil, err } - _ = ctrl.SetControllerReference(kcp, infraMachineTemplate, c.Scheme) + _ = ctrl.SetControllerReference(cluster, infraMachineTemplate, c.Client.Scheme()) + err = c.Client.Patch(ctx, infraMachineTemplate, client.Merge, &client.PatchOptions{FieldManager: "k0smotron"}) + if err != nil { + return nil, err + } template, found, err := unstructured.NestedMap(infraMachineTemplate.UnstructuredContent(), "spec", "template") if !found { diff --git a/internal/controller/controlplane/k0s_controlplane_controller.go b/internal/controller/controlplane/k0s_controlplane_controller.go index 22d7ea4d6..f8f462878 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller.go +++ b/internal/controller/controlplane/k0s_controlplane_controller.go @@ -36,7 +36,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -48,7 +47,6 @@ import ( "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/certs" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/failuredomains" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" @@ -67,15 +65,20 @@ const ( ) var ( - ErrNotReady = fmt.Errorf("waiting for the state") - ErrNewMachinesNotReady = fmt.Errorf("waiting for new machines: %w", ErrNotReady) + ErrNotReady = fmt.Errorf("waiting for the state") + ErrNewMachinesNotReady = fmt.Errorf("waiting for new machines: %w", ErrNotReady) + FRPTokenNameTemplate = "%s-frp-token" + FRPConfigMapNameTemplate = "%s-frps-config" + FRPDeploymentNameTemplate = "%s-frps" + FRPServiceNameTemplate = "%s-frps" ) type K0sController struct { client.Client - Scheme *runtime.Scheme ClientSet *kubernetes.Clientset RESTConfig *rest.Config + // workloadClusterKubeClient is used during testing to inject a fake client + workloadClusterKubeClient *kubernetes.Clientset } // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=k0scontrolplanes/status,verbs=get;list;watch;create;update;patch;delete @@ -128,6 +131,11 @@ func (c *K0sController) Reconcile(ctx context.Context, req ctrl.Request) (res ct return ctrl.Result{}, nil } + if annotations.IsPaused(cluster, kcp) { + log.Info("Reconciliation is paused for this object or owning cluster") + return ctrl.Result{}, nil + } + // Always patch the object to update the status defer func() { log.Info("Updating status") @@ -171,13 +179,6 @@ func (c *K0sController) Reconcile(ctx context.Context, req ctrl.Request) (res ct log = log.WithValues("cluster", cluster.Name) - // TODO: Use paused.EnsurePausedCondition from "sigs.k8s.io/cluster-api/util/paused" when upgrading to v1.9.0. - if annotations.IsPaused(cluster, kcp) { - log.Info("Reconciliation is paused for this object or owning cluster") - conditions.MarkTrue(kcp, cpv1beta1.ControlPlanePausedCondition) - return ctrl.Result{}, nil - } - if err := c.ensureCertificates(ctx, cluster, kcp); err != nil { log.Error(err, "Failed to ensure certificates") return ctrl.Result{}, err @@ -349,7 +350,7 @@ func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv log.Log.Info("Got current cluster version", "version", currentVersion) machineNamesToDelete := make(map[string]bool) - desiredMachineNames := make(map[string]bool) + desiredMachineNamesSlice := []string{} var clusterIsUpdating bool var clusterIsMutating bool @@ -358,19 +359,28 @@ func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv clusterIsUpdating = true clusterIsMutating = true if kcp.Spec.UpdateStrategy == cpv1beta1.UpdateInPlace { - desiredMachineNames[m.Name] = true + desiredMachineNamesSlice = append(desiredMachineNamesSlice, m.Name) } else { machineNamesToDelete[m.Name] = true } } else if !matchesTemplateClonedFrom(infraMachines, kcp, m) { clusterIsMutating = true machineNamesToDelete[m.Name] = true - } else if machines.Len() > int(kcp.Spec.Replicas)+len(machineNamesToDelete) { - machineNamesToDelete[m.Name] = true } else { - desiredMachineNames[m.Name] = true + desiredMachineNamesSlice = append(desiredMachineNamesSlice, m.Name) } } + + desiredMachineNames := make(map[string]bool) + for i := range desiredMachineNamesSlice { + desiredMachineNames[desiredMachineNamesSlice[i]] = true + } + + // if it is necessary to reduce the number of replicas even counting the replicas to be eliminated + // because they are outdated, we choose the oldest among the valid ones. + if machines.Len() > int(kcp.Spec.Replicas)+len(machineNamesToDelete) && len(desiredMachineNamesSlice) > 0 { + machineNamesToDelete[desiredMachineNamesSlice[0]] = true + } log.Log.Info("Collected machines", "count", machines.Len(), "desired", kcp.Spec.Replicas, "updating", clusterIsUpdating, "deleting", len(machineNamesToDelete), "desiredMachines", desiredMachineNames) if clusterIsUpdating { @@ -399,10 +409,10 @@ func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv if len(machineNamesToDelete)+len(desiredMachineNames) > int(kcp.Spec.Replicas) { - m := machines.Newest().Name - err := c.checkMachineIsReady(ctx, m, cluster) + newestMachine := machines.Newest().Name + err := c.checkMachineIsReady(ctx, newestMachine, cluster) if err != nil { - logger.Error(err, "Error checking machine left", "machine", m) + logger.Error(err, "Error checking machine left", "machine", newestMachine) return err } @@ -689,7 +699,7 @@ token = ` + frpToken + ` ` } - frpsCMName := kcp.GetName() + "-frps-config" + frpsCMName := fmt.Sprintf(FRPConfigMapNameTemplate, kcp.GetName()) cm := corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ Kind: "ConfigMap", @@ -704,7 +714,7 @@ token = ` + frpToken + ` }, } - _ = ctrl.SetControllerReference(kcp, &cm, c.Scheme) + _ = ctrl.SetControllerReference(kcp, &cm, c.Client.Scheme()) err = c.Client.Patch(ctx, &cm, client.Apply, &client.PatchOptions{FieldManager: "k0s-bootstrap"}) if err != nil { return fmt.Errorf("error creating ConfigMap: %w", err) @@ -716,7 +726,7 @@ token = ` + frpToken + ` Kind: "Deployment", }, ObjectMeta: metav1.ObjectMeta{ - Name: kcp.GetName() + "-frps", + Name: fmt.Sprintf(FRPDeploymentNameTemplate, kcp.GetName()), Namespace: kcp.GetNamespace(), }, Spec: appsv1.DeploymentSpec{ @@ -773,7 +783,7 @@ token = ` + frpToken + ` }}, }, } - _ = ctrl.SetControllerReference(kcp, &frpsDeployment, c.Scheme) + _ = ctrl.SetControllerReference(kcp, &frpsDeployment, c.Client.Scheme()) err = c.Client.Patch(ctx, &frpsDeployment, client.Apply, &client.PatchOptions{FieldManager: "k0s-bootstrap"}) if err != nil { return fmt.Errorf("error creating Deployment: %w", err) @@ -785,7 +795,7 @@ token = ` + frpToken + ` Kind: "Service", }, ObjectMeta: metav1.ObjectMeta{ - Name: kcp.GetName() + "-frps", + Name: fmt.Sprintf(FRPServiceNameTemplate, kcp.GetName()), Namespace: kcp.GetNamespace(), }, Spec: corev1.ServiceSpec{ @@ -809,7 +819,7 @@ token = ` + frpToken + ` Type: corev1.ServiceTypeNodePort, }, } - _ = ctrl.SetControllerReference(kcp, &frpsService, c.Scheme) + _ = ctrl.SetControllerReference(kcp, &frpsService, c.Client.Scheme()) err = c.Client.Patch(ctx, &frpsService, client.Apply, &client.PatchOptions{FieldManager: "k0s-bootstrap"}) if err != nil { return fmt.Errorf("error creating Service: %w", err) @@ -828,7 +838,7 @@ func (c *K0sController) detectNodeIP(ctx context.Context, _ *cpv1beta1.K0sContro } func (c *K0sController) createFRPToken(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) (string, error) { - secretName := cluster.Name + "-frp-token" + secretName := fmt.Sprintf(FRPTokenNameTemplate, cluster.Name) var existingSecret corev1.Secret err := c.Client.Get(ctx, client.ObjectKey{Name: secretName, Namespace: cluster.Namespace}, &existingSecret) @@ -857,7 +867,7 @@ func (c *K0sController) createFRPToken(ctx context.Context, cluster *clusterv1.C Type: clusterv1.ClusterSecretType, } - _ = ctrl.SetControllerReference(kcp, frpSecret, c.Scheme) + _ = ctrl.SetControllerReference(kcp, frpSecret, c.Client.Scheme()) return frpToken, c.Client.Patch(ctx, frpSecret, client.Apply, &client.PatchOptions{ FieldManager: "k0smotron", diff --git a/internal/controller/controlplane/k0s_controlplane_controller_test.go b/internal/controller/controlplane/k0s_controlplane_controller_test.go index c1348e352..41ccb4225 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller_test.go +++ b/internal/controller/controlplane/k0s_controlplane_controller_test.go @@ -17,15 +17,45 @@ limitations under the License. package controlplane import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" "testing" + "time" + . "github.com/onsi/gomega" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + restfake "k8s.io/client-go/rest/fake" + "k8s.io/client-go/tools/clientcmd/api" + clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + kubeadmConfig "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/external" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/secret" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + autopilot "github.com/k0sproject/k0s/pkg/apis/autopilot/v1beta2" bootstrapv1 "github.com/k0sproject/k0smotron/api/bootstrap/v1beta1" "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" + cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" ) func TestK0sConfigEnrichment(t *testing.T) { @@ -212,3 +242,1300 @@ func Test_machineName(t *testing.T) { }) } } +func TestReconcileReturnErrorWhenOwnerClusterIsMissing(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-return-error-cluster-owner-missing") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, gmt := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + g.Expect(testEnv.Create(ctx, gmt)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, gmt, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: 20 * time.Second, Requeue: true})) + + g.Expect(testEnv.CleanupAndWait(ctx, cluster)).To(Succeed()) + + g.Eventually(func() error { + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + return err + }, 10*time.Second).Should(HaveOccurred()) +} + +func TestReconcileNoK0sControlPlane(t *testing.T) { + g := NewWithT(t) + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-no-control-plane") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) +} + +func TestReconcilePausedCluster(t *testing.T) { + g := NewWithT(t) + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-paused-cluster") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + + // Cluster 'paused'. + cluster.Spec.Paused = true + + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) +} + +func TestReconcilePausedK0sControlPlane(t *testing.T) { + g := NewWithT(t) + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-paused-k0scontrolplane") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // K0sControlPlane with 'paused' annotation. + kcp.Annotations = map[string]string{"cluster.x-k8s.io/paused": "true"} + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) +} + +func TestReconcileTunneling(t *testing.T) { + g := NewWithT(t) + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-tunneling") + g.Expect(err).ToNot(HaveOccurred()) + + node := createNode() + g.Expect(testEnv.Create(ctx, node)).To(Succeed()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + kcp.Spec.K0sConfigSpec = bootstrapv1.K0sConfigSpec{ + Tunneling: bootstrapv1.TunnelingSpec{ + Enabled: true, + }, + } + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + clientSet, err := kubernetes.NewForConfig(testEnv.Config) + g.Expect(err).ToNot(HaveOccurred()) + + r := &K0sController{ + Client: testEnv, + ClientSet: clientSet, + } + err = r.reconcileTunneling(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + frpToken, err := clientSet.CoreV1().Secrets(ns.Name).Get(ctx, fmt.Sprintf(FRPTokenNameTemplate, cluster.Name), metav1.GetOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(metav1.IsControlledBy(frpToken, kcp)).To(BeTrue()) + + frpCM, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get(ctx, fmt.Sprintf(FRPConfigMapNameTemplate, kcp.GetName()), metav1.GetOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(metav1.IsControlledBy(frpCM, kcp)).To(BeTrue()) + + frpDeploy, err := clientSet.AppsV1().Deployments(ns.Name).Get(ctx, fmt.Sprintf(FRPDeploymentNameTemplate, kcp.GetName()), metav1.GetOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(metav1.IsControlledBy(frpDeploy, kcp)).To(BeTrue()) + + frpService, err := clientSet.CoreV1().Services(ns.Name).Get(ctx, fmt.Sprintf(FRPServiceNameTemplate, kcp.GetName()), metav1.GetOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(metav1.IsControlledBy(frpService, kcp)).To(BeTrue()) +} + +func TestReconcileKubeconfigEmptyAPIEndpoints(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-kubeconfig-empty-api-endpoints") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + + // Host and Port with zero values. + cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{} + + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + err = r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).To(HaveOccurred()) + + kubeconfigSecret := &corev1.Secret{} + secretKey := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: secret.Name(cluster.Name, secret.Kubeconfig), + } + g.Expect(testEnv.GetAPIReader().Get(ctx, secretKey, kubeconfigSecret)).To(MatchError(ContainSubstring("not found"))) +} + +func TestReconcileKubeconfigMissingCACertificate(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-kubeconfig-missing-ca-certificates") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + + err = r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).To(HaveOccurred()) + + kubeconfigSecret := &corev1.Secret{} + secretKey := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: secret.Name(cluster.Name, secret.Kubeconfig), + } + g.Expect(testEnv.GetAPIReader().Get(ctx, secretKey, kubeconfigSecret)).To(MatchError(ContainSubstring("not found"))) +} + +func TestReconcileKubeconfigTunnelingModeNotEnabled(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-kubeconfig-tunneling-mode-not-enabled") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // Tunneling not enabled. + kcp.Spec.K0sConfigSpec.Tunneling.Enabled = false + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + kubeconfigSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret.Name(cluster.Name, secret.Kubeconfig), + Namespace: cluster.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + OwnerReferences: []metav1.OwnerReference{}, + }, + Data: map[string][]byte{ + secret.KubeconfigDataName: {}, + }, + } + g.Expect(testEnv.Create(ctx, kubeconfigSecret)).To(Succeed()) + + r := &K0sController{ + Client: testEnv, + } + + err = r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestReconcileKubeconfigTunnelingModeProxy(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-kubeconfig-tunneling-mode-proxy") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // Tunneling mode = 'proxy' + kcp.Spec.K0sConfigSpec.Tunneling = bootstrapv1.TunnelingSpec{ + Enabled: true, + Mode: "proxy", + ServerAddress: "test.com", + TunnelingNodePort: 9999, + } + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + kubeconfigSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret.Name(cluster.Name, secret.Kubeconfig), + Namespace: cluster.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + OwnerReferences: []metav1.OwnerReference{}, + }, + Data: map[string][]byte{ + secret.KubeconfigDataName: {}, + }, + } + g.Expect(testEnv.Create(ctx, kubeconfigSecret)).To(Succeed()) + + clusterCerts := secret.NewCertificatesForInitialControlPlane(&kubeadmConfig.ClusterConfiguration{}) + g.Expect(clusterCerts.Generate()).To(Succeed()) + caCert := clusterCerts.GetByPurpose(secret.ClusterCA) + caCertSecret := caCert.AsSecret( + client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, + *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane")), + ) + g.Expect(testEnv.Create(ctx, caCertSecret)).To(Succeed()) + + r := &K0sController{ + Client: testEnv, + } + + err = r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).To(HaveOccurred()) + + secretKey := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: secret.Name(cluster.Name+"-proxied", secret.Kubeconfig), + } + + kubeconfigProxiedSecret := &corev1.Secret{} + g.Expect(testEnv.Get(ctx, secretKey, kubeconfigProxiedSecret)).To(Succeed()) + + kubeconfigProxiedSecretCrt, _ := runtime.Decode(clientcmdlatest.Codec, kubeconfigProxiedSecret.Data["value"]) + for _, v := range kubeconfigProxiedSecretCrt.(*api.Config).Clusters { + g.Expect(v.Server).To(Equal("https://test.endpoint:6443")) + g.Expect(v.ProxyURL).To(Equal("http://test.com:9999")) + } +} + +func TestReconcileKubeconfigTunnelingModeTunnel(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-kubeconfig-tunneling-mode-tunnel") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // Tunneling mode = 'tunnel' + kcp.Spec.K0sConfigSpec.Tunneling = bootstrapv1.TunnelingSpec{ + Enabled: true, + Mode: "tunnel", + ServerAddress: "test.com", + TunnelingNodePort: 9999, + } + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + kubeconfigSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret.Name(cluster.Name, secret.Kubeconfig), + Namespace: cluster.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + OwnerReferences: []metav1.OwnerReference{}, + }, + Data: map[string][]byte{ + secret.KubeconfigDataName: {}, + }, + } + g.Expect(testEnv.Create(ctx, kubeconfigSecret)).To(Succeed()) + + clusterCerts := secret.NewCertificatesForInitialControlPlane(&kubeadmConfig.ClusterConfiguration{}) + g.Expect(clusterCerts.Generate()).To(Succeed()) + caCert := clusterCerts.GetByPurpose(secret.ClusterCA) + caCertSecret := caCert.AsSecret( + client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, + *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane")), + ) + g.Expect(testEnv.Create(ctx, caCertSecret)).To(Succeed()) + + r := &K0sController{ + Client: testEnv, + } + + err = r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).To(HaveOccurred()) + + secretKey := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: secret.Name(cluster.Name+"-tunneled", secret.Kubeconfig), + } + kubeconfigProxiedSecret := &corev1.Secret{} + g.Expect(testEnv.Get(ctx, secretKey, kubeconfigProxiedSecret)).ToNot(HaveOccurred()) + + kubeconfigProxiedSecretCrt, _ := runtime.Decode(clientcmdlatest.Codec, kubeconfigProxiedSecret.Data["value"]) + for _, v := range kubeconfigProxiedSecretCrt.(*api.Config).Clusters { + g.Expect(v.Server).To(Equal("https://test.com:9999")) + } +} + +func TestReconcileK0sConfigNotProvided(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-config-k0sconfig-not-provided") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + kcp.Spec.K0sConfigSpec.K0s = nil + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + err = r.reconcileConfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(kcp.Spec.K0sConfigSpec.K0s).To(BeNil()) +} + +func TestReconcileK0sConfigWithNLLBEnabled(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-config-nllb-enabled") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // Enable '.spec.network.nodeLocalLoadBalancing' + kcp.Spec.K0sConfigSpec = bootstrapv1.K0sConfigSpec{ + K0s: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "kind": "ClusterConfig", + "spec": map[string]interface{}{ + "api": map[string]interface{}{ + "sans": []interface{}{ + "test.com", + }, + }, + "network": map[string]interface{}{ + "nodeLocalLoadBalancing": map[string]interface{}{ + "enabled": true, + }, + }, + }, + }, + }, + } + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + err = r.reconcileConfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + expectedk0sConfig := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "kind": "ClusterConfig", + "spec": map[string]interface{}{ + "api": map[string]interface{}{ + "sans": []interface{}{ + "test.endpoint", + "test.com", + }, + }, + "network": map[string]interface{}{ + "nodeLocalLoadBalancing": map[string]interface{}{ + "enabled": true, + }, + }, + }, + }, + } + g.Expect(kcp.Spec.K0sConfigSpec.K0s).To(Equal(expectedk0sConfig)) +} + +func TestReconcileK0sConfigWithNLLBDisabled(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-config-nllb-disabled") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // Disable '.spec.network.nodeLocalLoadBalancing' + kcp.Spec.K0sConfigSpec = bootstrapv1.K0sConfigSpec{ + K0s: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "kind": "ClusterConfig", + "spec": map[string]interface{}{ + "api": map[string]interface{}{ + "sans": []interface{}{ + "test.com", + }, + }, + }, + }, + }, + } + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + err = r.reconcileConfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + expectedk0sConfig := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "kind": "ClusterConfig", + "spec": map[string]interface{}{ + "api": map[string]interface{}{ + "sans": []interface{}{ + "test.com", + }, + "externalAddress": "test.endpoint", + }, + }, + }, + } + g.Expect(kcp.Spec.K0sConfigSpec.K0s).To(Equal(expectedk0sConfig)) +} + +func TestReconcileK0sConfigTunnelingServerAddressToApiSans(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-config-tunneling-serveraddress-to-api-sans") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, _ := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + + // With '.spec.k0sConfigSpec.Tunneling.ServerAddress' + kcp.Spec.K0sConfigSpec = bootstrapv1.K0sConfigSpec{ + K0s: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "kind": "ClusterConfig", + "spec": map[string]interface{}{ + "api": map[string]interface{}{ + "sans": []interface{}{ + "test.com", + }, + }, + }, + }, + }, + Tunneling: bootstrapv1.TunnelingSpec{ + ServerAddress: "my-tunneling-server-address.com", + }, + } + + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, cluster, ns) + + r := &K0sController{ + Client: testEnv, + } + err = r.reconcileConfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + expectedk0sConfig := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "k0s.k0sproject.io/v1beta1", + "kind": "ClusterConfig", + "spec": map[string]interface{}{ + "api": map[string]interface{}{ + "sans": []interface{}{ + "test.com", + "my-tunneling-server-address.com", + }, + "externalAddress": "test.endpoint", + }, + }, + }, + } + g.Expect(kcp.Spec.K0sConfigSpec.K0s).To(Equal(expectedk0sConfig)) +} + +func TestReconcileMachinesScaleUp(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-machine-scale-up") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, gmt := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, gmt)).To(Succeed()) + + desiredReplicas := 5 + kcp.Spec.Replicas = int32(desiredReplicas) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, gmt, cluster, ns) + + kcpOwnerRef := *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane")) + + r := &K0sController{ + Client: testEnv, + } + + firstMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", kcp.Name, 0), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + clusterv1.MachineControlPlaneNameLabel: kcp.GetName(), + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + firstMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, firstMachineRelatedToControlPlane)).To(Succeed()) + + secondMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", kcp.Name, 1), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + clusterv1.MachineControlPlaneNameLabel: kcp.GetName(), + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + secondMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, secondMachineRelatedToControlPlane)).To(Succeed()) + + machineNotRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "external-machine", + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + }, + } + g.Expect(testEnv.Create(ctx, machineNotRelatedToControlPlane)).To(Succeed()) + + g.Eventually(func(g Gomega) { + err = r.reconcileMachines(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + }, 5*time.Second).Should(Succeed()) + + machines, err := collections.GetFilteredMachinesForCluster(ctx, testEnv, cluster, collections.ControlPlaneMachines(cluster.Name), collections.ActiveMachines) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(machines).To(HaveLen(desiredReplicas)) + for _, m := range machines { + expectedLabels := map[string]string{ + clusterv1.ClusterNameLabel: cluster.GetName(), + clusterv1.MachineControlPlaneLabel: "true", + clusterv1.MachineControlPlaneNameLabel: kcp.GetName(), + } + g.Expect(m.Labels).Should(Equal(expectedLabels)) + g.Expect(metav1.IsControlledBy(m, kcp)).To(BeTrue()) + g.Expect(*m.Spec.Version).Should(Equal(kcp.Spec.Version)) + } +} + +func TestReconcileMachinesScaleDown(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-machines-scale-down") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, gmt := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, gmt)).To(Succeed()) + + desiredReplicas := 1 + kcp.Spec.Replicas = int32(desiredReplicas) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, gmt, cluster, ns) + + kcpOwnerRef := *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane")) + + fakeClient := &restfake.RESTClient{ + Client: restfake.CreateHTTPClient(roundTripperForWorkloadClusterAPI), + } + + restClient, _ := rest.RESTClientFor(&rest.Config{ + ContentConfig: rest.ContentConfig{ + NegotiatedSerializer: scheme.Codecs, + GroupVersion: &metav1.SchemeGroupVersion, + }, + }) + restClient.Client = fakeClient.Client + + r := &K0sController{ + Client: testEnv, + workloadClusterKubeClient: kubernetes.New(restClient), + } + + firstMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", kcp.Name, 0), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + clusterv1.MachineControlPlaneNameLabel: kcp.GetName(), + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + firstMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, firstMachineRelatedToControlPlane)).To(Succeed()) + + secondMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", kcp.Name, 1), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + clusterv1.MachineControlPlaneNameLabel: kcp.GetName(), + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + secondMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, secondMachineRelatedToControlPlane)).To(Succeed()) + + thirdMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", kcp.Name, 2), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + clusterv1.MachineControlPlaneNameLabel: kcp.GetName(), + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + thirdMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, thirdMachineRelatedToControlPlane)).To(Succeed()) + + machineNotRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "external-machine", + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + }, + } + g.Expect(testEnv.Create(ctx, machineNotRelatedToControlPlane)).To(Succeed()) + + g.Eventually(func(g Gomega) { + err = r.reconcileMachines(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + machines, err := collections.GetFilteredMachinesForCluster(ctx, testEnv, cluster, collections.ControlPlaneMachines(cluster.Name), collections.ActiveMachines) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(machines).To(HaveLen(desiredReplicas)) + + k0sBootstrapConfigList := &bootstrapv1.K0sControllerConfigList{} + g.Expect(testEnv.GetAPIReader().List(ctx, k0sBootstrapConfigList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(k0sBootstrapConfigList.Items).To(HaveLen(desiredReplicas)) + + for _, m := range machines { + expectedLabels := map[string]string{ + clusterv1.ClusterNameLabel: cluster.GetName(), + clusterv1.MachineControlPlaneLabel: "true", + clusterv1.MachineControlPlaneNameLabel: kcp.GetName(), + } + g.Expect(m.Labels).Should(Equal(expectedLabels)) + g.Expect(metav1.IsControlledBy(m, kcp)).To(BeTrue()) + g.Expect(*m.Spec.Version).Should(Equal(kcp.Spec.Version)) + + // verify that the bootrap config related to the existing machines is present. + bootstrapObjectKey := client.ObjectKey{ + Namespace: m.Namespace, + Name: m.Name, + } + kc := &bootstrapv1.K0sControllerConfig{} + g.Expect(testEnv.GetAPIReader().Get(ctx, bootstrapObjectKey, kc)).ToNot(HaveOccurred()) + g.Expect(metav1.IsControlledBy(kc, m)).To(BeTrue()) + } + }, 10*time.Second).Should(HaveOccurred()) +} + +func TestReconcileMachinesSyncOldMachines(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-machines-sync-old-machines") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, gmt := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, gmt)).To(Succeed()) + + desiredReplicas := 3 + kcp.Spec.Replicas = int32(desiredReplicas) + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, gmt, cluster, ns) + + kcpOwnerRef := *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0sControlPlane")) + + fakeClient := &restfake.RESTClient{ + Client: restfake.CreateHTTPClient(roundTripperForWorkloadClusterAPI), + } + + restClient, _ := rest.RESTClientFor(&rest.Config{ + ContentConfig: rest.ContentConfig{ + NegotiatedSerializer: scheme.Codecs, + GroupVersion: &metav1.SchemeGroupVersion, + }, + }) + restClient.Client = fakeClient.Client + + clientSet, err := kubernetes.NewForConfig(testEnv.Config) + g.Expect(err).ToNot(HaveOccurred()) + + r := &K0sController{ + Client: testEnv, + workloadClusterKubeClient: kubernetes.New(restClient), + ClientSet: clientSet, + } + + firstMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", kcp.Name, 0), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + clusterv1.MachineControlPlaneNameLabel: kcp.GetName(), + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.29.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + firstMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, firstMachineRelatedToControlPlane)).To(Succeed()) + firstControllerConfig := &bootstrapv1.K0sControllerConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", kcp.Name, 0), + Namespace: ns.Name, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "cluster.x-k8s.io/v1beta1", + Kind: "Machine", + Name: firstMachineRelatedToControlPlane.GetName(), + UID: firstMachineRelatedToControlPlane.GetUID(), + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + }}, + }, + } + g.Expect(testEnv.Create(ctx, firstControllerConfig)).To(Succeed()) + + secondMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", kcp.Name, 1), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + clusterv1.MachineControlPlaneNameLabel: kcp.GetName(), + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.30.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + secondMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, secondMachineRelatedToControlPlane)).To(Succeed()) + secondControllerConfig := &bootstrapv1.K0sControllerConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", kcp.Name, 1), + Namespace: ns.Name, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "cluster.x-k8s.io/v1beta1", + Kind: "Machine", + Name: secondMachineRelatedToControlPlane.GetName(), + UID: secondMachineRelatedToControlPlane.GetUID(), + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + }}, + }, + } + g.Expect(testEnv.Create(ctx, secondControllerConfig)).To(Succeed()) + + thirdMachineRelatedToControlPlane := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", kcp.Name, 2), + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "true", + clusterv1.MachineControlPlaneNameLabel: kcp.GetName(), + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: ptr.To("v1.29.0"), + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: ns.Name, + Name: gmt.GetName(), + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + } + thirdMachineRelatedToControlPlane.SetOwnerReferences([]metav1.OwnerReference{kcpOwnerRef}) + g.Expect(testEnv.Create(ctx, thirdMachineRelatedToControlPlane)).To(Succeed()) + thirdControllerConfig := &bootstrapv1.K0sControllerConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", kcp.Name, 2), + Namespace: ns.Name, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "cluster.x-k8s.io/v1beta1", + Kind: "Machine", + Name: thirdMachineRelatedToControlPlane.GetName(), + UID: thirdMachineRelatedToControlPlane.GetUID(), + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + }}, + }, + } + g.Expect(testEnv.Create(ctx, thirdControllerConfig)).To(Succeed()) + + g.Eventually(func(g Gomega) { + err = r.reconcileMachines(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + + machines, err := collections.GetFilteredMachinesForCluster(ctx, testEnv, cluster, collections.ControlPlaneMachines(cluster.Name), collections.ActiveMachines) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(machines).To(HaveLen(desiredReplicas)) + + k0sBootstrapConfigList := &bootstrapv1.K0sControllerConfigList{} + g.Expect(testEnv.GetAPIReader().List(ctx, k0sBootstrapConfigList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(k0sBootstrapConfigList.Items).To(HaveLen(desiredReplicas)) + + for _, m := range machines { + expectedLabels := map[string]string{ + clusterv1.ClusterNameLabel: cluster.GetName(), + clusterv1.MachineControlPlaneLabel: "true", + clusterv1.MachineControlPlaneNameLabel: kcp.GetName(), + } + g.Expect(m.Labels).Should(Equal(expectedLabels)) + g.Expect(metav1.IsControlledBy(m, kcp)).To(BeTrue()) + g.Expect(*m.Spec.Version).Should(Equal(kcp.Spec.Version)) + + // verify that the bootrap config related to the existing machines is present. + bootstrapObjectKey := client.ObjectKey{ + Namespace: m.Namespace, + Name: m.Name, + } + kc := &bootstrapv1.K0sControllerConfig{} + g.Expect(testEnv.GetAPIReader().Get(ctx, bootstrapObjectKey, kc)).ToNot(HaveOccurred()) + g.Expect(metav1.IsControlledBy(kc, m)).To(BeTrue()) + } + }, 5*time.Second).Should(Succeed()) +} + +func TestReconcileInitializeControlPlanes(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "test-reconcile-initialize-controlplanes") + g.Expect(err).ToNot(HaveOccurred()) + + cluster, kcp, gmt := createClusterWithControlPlane(ns.Name) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + kcp.Spec.Replicas = 1 + g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + g.Expect(testEnv.Create(ctx, gmt)).To(Succeed()) + + defer func(do ...client.Object) { + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + }(kcp, gmt, cluster, ns) + + expectedLabels := map[string]string{clusterv1.ClusterNameLabel: cluster.Name} + + fakeClient := &restfake.RESTClient{ + Client: restfake.CreateHTTPClient(roundTripperForWorkloadClusterAPI), + } + + restClient, _ := rest.RESTClientFor(&rest.Config{ + ContentConfig: rest.ContentConfig{ + NegotiatedSerializer: scheme.Codecs, + GroupVersion: &metav1.SchemeGroupVersion, + }, + }) + restClient.Client = fakeClient.Client + + r := &K0sController{ + Client: testEnv, + workloadClusterKubeClient: kubernetes.New(restClient), + } + + _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(testEnv.GetAPIReader().Get(ctx, client.ObjectKey{Name: kcp.Name, Namespace: kcp.Namespace}, kcp)).To(Succeed()) + g.Expect(kcp.Status.Selector).NotTo(BeEmpty()) + g.Expect(kcp.Status.Version).To(Equal(fmt.Sprintf("%s+%s", kcp.Spec.Version, defaultK0sSuffix))) + g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(1)) + g.Expect(testEnv.GetAPIReader().Get(ctx, util.ObjectKey(gmt), gmt)).To(Succeed()) + g.Expect(gmt.GetOwnerReferences()).To(ContainElement(metav1.OwnerReference{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: cluster.Name, + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + UID: cluster.UID, + })) + g.Expect(conditions.IsFalse(kcp, cpv1beta1.ControlPlaneReadyCondition)).To(BeTrue()) + + // Expected secrets are created + caSecret, err := secret.GetFromNamespacedName(ctx, testEnv, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, secret.ClusterCA) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(caSecret).NotTo(BeNil()) + g.Expect(caSecret.Data).NotTo(BeEmpty()) + g.Expect(caSecret.Labels).To(Equal(expectedLabels)) + + etcdSecret, err := secret.GetFromNamespacedName(ctx, testEnv, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, secret.EtcdCA) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(etcdSecret).NotTo(BeNil()) + g.Expect(etcdSecret.Data).NotTo(BeEmpty()) + g.Expect(etcdSecret.Labels).To(Equal(expectedLabels)) + + kubeconfigSecret, err := secret.GetFromNamespacedName(ctx, testEnv, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, secret.Kubeconfig) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(kubeconfigSecret).NotTo(BeNil()) + g.Expect(kubeconfigSecret.Data).NotTo(BeEmpty()) + g.Expect(kubeconfigSecret.Labels).To(Equal(expectedLabels)) + k, err := kubeconfig.FromSecret(ctx, testEnv, util.ObjectKey(cluster)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(k).NotTo(BeEmpty()) + + proxySecret, err := secret.GetFromNamespacedName(ctx, testEnv, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, secret.FrontProxyCA) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(proxySecret).NotTo(BeNil()) + g.Expect(proxySecret.Data).NotTo(BeEmpty()) + g.Expect(proxySecret.Labels).To(Equal(expectedLabels)) + + saSecret, err := secret.GetFromNamespacedName(ctx, testEnv, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, secret.ServiceAccount) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(saSecret).NotTo(BeNil()) + g.Expect(saSecret.Data).NotTo(BeEmpty()) + g.Expect(saSecret.Labels).To(Equal(expectedLabels)) + + machineList := &clusterv1.MachineList{} + g.Expect(testEnv.GetAPIReader().List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(machineList.Items).To(HaveLen(1)) + machine := machineList.Items[0] + g.Expect(machine.Name).To(HavePrefix(kcp.Name)) + g.Expect(*machine.Spec.Version).To(Equal(fmt.Sprintf("%s+%s", kcp.Spec.Version, defaultK0sSuffix))) + // Newly cloned infra objects should have the infraref annotation. + infraObj, err := external.Get(ctx, r.Client, &machine.Spec.InfrastructureRef, machine.Spec.InfrastructureRef.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, gmt.GetName())) + g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, gmt.GroupVersionKind().GroupKind().String())) + + k0sBootstrapConfigList := &bootstrapv1.K0sControllerConfigList{} + g.Expect(testEnv.GetAPIReader().List(ctx, k0sBootstrapConfigList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(k0sBootstrapConfigList.Items).To(HaveLen(1)) + + g.Expect(metav1.IsControlledBy(&k0sBootstrapConfigList.Items[0], &machine)).To(BeTrue()) + +} + +func roundTripperForWorkloadClusterAPI(req *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + + switch req.Method { + case "GET": + if strings.HasPrefix(req.URL.Path, "/apis/autopilot.k0sproject.io/v1beta2/controlnodes/") { + res, err := json.Marshal(autopilot.ControlNode{}) + if err != nil { + return nil, err + } + return &http.Response{StatusCode: http.StatusOK, Header: header, Body: io.NopCloser(bytes.NewReader(res))}, nil + + } + case "DELETE": + if strings.HasPrefix(req.URL.Path, "/apis/autopilot.k0sproject.io/v1beta2/controlnodes/") { + return &http.Response{StatusCode: http.StatusOK, Header: header, Body: nil}, nil + } + case "PATCH": + switch { + case strings.HasPrefix(req.URL.Path, "/apis/etcd.k0sproject.io/v1beta1/etcdmembers/"): + { + return &http.Response{StatusCode: http.StatusOK, Header: header, Body: nil}, nil + } + case strings.HasPrefix(req.URL.Path, "/apis/autopilot.k0sproject.io/v1beta2/controlnodes/"): + { + return &http.Response{StatusCode: http.StatusOK, Header: header, Body: nil}, nil + } + case strings.HasPrefix(req.URL.Path, "/apis/infrastructure.cluster.x-k8s.io/v1beta1/namespaces/"): + { + return &http.Response{StatusCode: http.StatusOK, Header: header, Body: nil}, nil + } + } + } + + return &http.Response{StatusCode: http.StatusNotFound, Header: header, Body: nil}, nil +} + +func newCluster(namespacedName *types.NamespacedName) *clusterv1.Cluster { + return &clusterv1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespacedName.Namespace, + Name: namespacedName.Name, + }, + } +} + +func createNode() *v1.Node { + return &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"node-role.kubernetes.io/control-plane": ""}, + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeExternalIP, + Address: "1.1.1.1", + }, + }, + }, + } +} + +func createClusterWithControlPlane(namespace string) (*clusterv1.Cluster, *cpv1beta1.K0sControlPlane, *unstructured.Unstructured) { + kcpName := fmt.Sprintf("kcp-foo-%s", util.RandomString(6)) + + cluster := newCluster(&types.NamespacedName{Name: kcpName, Namespace: namespace}) + cluster.Spec = clusterv1.ClusterSpec{ + ControlPlaneRef: &v1.ObjectReference{ + Kind: "K0sControlPlane", + Namespace: namespace, + Name: kcpName, + APIVersion: cpv1beta1.GroupVersion.String(), + }, + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: "test.endpoint", + Port: 6443, + }, + } + + kcp := &cpv1beta1.K0sControlPlane{ + TypeMeta: metav1.TypeMeta{ + APIVersion: cpv1beta1.GroupVersion.String(), + Kind: "K0sControlPlane", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + Name: kcpName, + UID: "1", + }, + }, + }, + Spec: v1beta1.K0sControlPlaneSpec{ + MachineTemplate: &v1beta1.K0sControlPlaneMachineTemplate{ + InfrastructureRef: v1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Namespace: namespace, + Name: "infra-foo", + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + }, + UpdateStrategy: cpv1beta1.UpdateRecreate, + Replicas: int32(1), + Version: "v1.30.0", + }, + } + + genericMachineTemplate := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "GenericInfrastructureMachineTemplate", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "infra-foo", + "namespace": namespace, + "annotations": map[string]interface{}{ + clusterv1.TemplateClonedFromNameAnnotation: kcp.Spec.MachineTemplate.InfrastructureRef.Name, + clusterv1.TemplateClonedFromGroupKindAnnotation: kcp.Spec.MachineTemplate.InfrastructureRef.GroupVersionKind().GroupKind().String(), + }, + }, + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + }, + }, + } + return cluster, kcp, genericMachineTemplate +} diff --git a/internal/controller/controlplane/suite_test.go b/internal/controller/controlplane/suite_test.go new file mode 100644 index 000000000..cf2af4a22 --- /dev/null +++ b/internal/controller/controlplane/suite_test.go @@ -0,0 +1,37 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "os" + "testing" + + "github.com/k0sproject/k0smotron/internal/test/envtest" + ctrl "sigs.k8s.io/controller-runtime" +) + +var ( + testEnv *envtest.Environment + ctx = ctrl.SetupSignalHandler() +) + +func TestMain(m *testing.M) { + testEnv = envtest.Build(ctx) + code := m.Run() + testEnv.Teardown() + os.Exit(code) +} diff --git a/internal/controller/controlplane/util.go b/internal/controller/controlplane/util.go index 91e4543ba..4935447a3 100644 --- a/internal/controller/controlplane/util.go +++ b/internal/controller/controlplane/util.go @@ -137,6 +137,10 @@ func (c *K0sController) regenerateKubeconfigSecret(ctx context.Context, kubeconf } func (c *K0sController) getKubeClient(ctx context.Context, cluster *clusterv1.Cluster) (*kubernetes.Clientset, error) { + if c.workloadClusterKubeClient != nil { + return c.workloadClusterKubeClient, nil + } + return k0smoutil.GetKubeClient(ctx, c.Client, cluster) } diff --git a/internal/test/envtest/environment.go b/internal/test/envtest/environment.go new file mode 100644 index 000000000..31fc14ad5 --- /dev/null +++ b/internal/test/envtest/environment.go @@ -0,0 +1,274 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + goruntime "runtime" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/pkg/errors" + "golang.org/x/tools/go/packages" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog/v2" + "sigs.k8s.io/cluster-api/cmd/clusterctl/log" + "sigs.k8s.io/cluster-api/util/kubeconfig" + + bootstrapv1beta1 "github.com/k0sproject/k0smotron/api/bootstrap/v1beta1" + controlplanev1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" + infrastructurev1beta1 "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" + k0smotronv1beta1 "github.com/k0sproject/k0smotron/api/k0smotron.io/v1beta1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" +) + +var ( + cacheSyncBackoff = wait.Backoff{ + Duration: 100 * time.Millisecond, + Factor: 1.5, + Steps: 8, + Jitter: 0.4, + } +) + +// Environment encapsulates a Kubernetes local test environment. +type Environment struct { + manager.Manager + client.Client + Config *rest.Config + env *envtest.Environment + cancel context.CancelFunc +} + +func init() { + logger := klog.Background() + // Use klog as the internal logger for this envtest environment. + log.SetLogger(logger) + // Additionally force all controllers to use the Ginkgo logger. + ctrl.SetLogger(logger) + // Add logger for ginkgo. + klog.SetOutput(ginkgo.GinkgoWriter) + + utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(k0smotronv1beta1.AddToScheme(scheme.Scheme)) + utilruntime.Must(bootstrapv1beta1.AddToScheme(scheme.Scheme)) + utilruntime.Must(controlplanev1beta1.AddToScheme(scheme.Scheme)) + utilruntime.Must(infrastructurev1beta1.AddToScheme(scheme.Scheme)) +} + +func newEnvironment() *Environment { + _, filename, _, _ := goruntime.Caller(0) + root := path.Join(path.Dir(filename), "..", "..", "..") + + capiCoreCrdsPath := "" + if capiCoreCrdsPath = getFilePathToCAPICoreCRDs(); capiCoreCrdsPath == "" { + panic(fmt.Errorf("failed to retrieve cluster-api core crds path")) + } + + crdPaths := []string{ + capiCoreCrdsPath, + filepath.Join(root, "config", "clusterapi", "bootstrap", "bases"), + filepath.Join(root, "config", "clusterapi", "controlplane", "bases"), + filepath.Join(root, "config", "clusterapi", "infrastructure", "bases"), + filepath.Join(root, "config", "clusterapi", "k0smotron.io", "bases"), + } + + env := &envtest.Environment{ + ErrorIfCRDPathMissing: true, + CRDDirectoryPaths: crdPaths, + CRDs: []*apiextensionsv1.CustomResourceDefinition{ + genericInfrastructureMachineCRD, + genericInfrastructureMachineTemplateCRD, + }, + } + + if _, err := env.Start(); err != nil { + panic(err) + } + + options := manager.Options{ + Scheme: scheme.Scheme, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &v1.ConfigMap{}, + &v1.Secret{}, + }, + // Use the cache for all Unstructured get/list calls. + Unstructured: true, + }, + }, + } + + mgr, err := ctrl.NewManager(env.Config, options) + if err != nil { + panic(fmt.Errorf("failed to start testenv manager: %w", err)) + } + + if kubeconfigPath := os.Getenv("CAPI_TEST_ENV_KUBECONFIG"); kubeconfigPath != "" { + klog.Infof("Writing test env kubeconfig to %q", kubeconfigPath) + config := kubeconfig.FromEnvTestConfig(env.Config, &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + }) + if err := os.WriteFile(kubeconfigPath, config, 0o600); err != nil { + panic(errors.Wrapf(err, "failed to write the test env kubeconfig")) + } + } + + return &Environment{ + Manager: mgr, + Client: mgr.GetClient(), + Config: mgr.GetConfig(), + env: env, + } +} + +func Build(ctx context.Context) *Environment { + testEnv := newEnvironment() + go func() { + fmt.Println("Starting the manager") + if err := testEnv.StartManager(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) + } + }() + + return testEnv +} + +func (e *Environment) Teardown() { + e.cancel() + if err := e.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop envtest: %v", err)) + } +} + +// StartManager starts the test controller against the local API server. +func (e *Environment) StartManager(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + e.cancel = cancel + return e.Manager.Start(ctx) +} + +// Stop stops the test environment. +func (e *Environment) Stop() error { + e.cancel() + return e.env.Stop() +} + +func getFilePathToCAPICoreCRDs() string { + packageName := "sigs.k8s.io/cluster-api" + packageConfig := &packages.Config{ + Mode: packages.NeedModule, + } + + pkgs, err := packages.Load(packageConfig, packageName) + if err != nil { + return "" + } + + return filepath.Join(pkgs[0].Module.Dir, "config", "crd", "bases") +} + +// CreateNamespace creates a new namespace with a generated name. +func (e *Environment) CreateNamespace(ctx context.Context, generateName string) (*v1.Namespace, error) { + ns := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: fmt.Sprintf("%s-", generateName), + Labels: map[string]string{ + "testenv/original-name": generateName, + }, + }, + } + if err := e.Client.Create(ctx, ns); err != nil { + return nil, err + } + + return ns, nil +} + +// Cleanup removes objects from the Environment. +func (e *Environment) Cleanup(ctx context.Context, objs ...client.Object) error { + errs := make([]error, 0, len(objs)) + for _, o := range objs { + err := e.Client.Delete(ctx, o) + if apierrors.IsNotFound(err) { + // If the object is not found, it must've been garbage collected + // already. For example, if we delete namespace first and then + // objects within it. + continue + } + errs = append(errs, err) + } + + return kerrors.NewAggregate(errs) +} + +// CleanupAndWait deletes all the given objects and waits for the cache to be updated accordingly. +// +// NOTE: Waiting for the cache to be updated helps in preventing test flakes due to the cache sync delays. +func (e *Environment) CleanupAndWait(ctx context.Context, objs ...client.Object) error { + if err := e.Cleanup(ctx, objs...); err != nil { + return err + } + + // Makes sure the cache is updated with the deleted object + errs := []error{} + for _, o := range objs { + // Ignoring namespaces because in testenv the namespace cleaner is not running. + if o.GetObjectKind().GroupVersionKind().GroupKind() == v1.SchemeGroupVersion.WithKind("Namespace").GroupKind() { + continue + } + + oCopy := o.DeepCopyObject().(client.Object) + key := client.ObjectKeyFromObject(o) + err := wait.ExponentialBackoff( + cacheSyncBackoff, + func() (done bool, err error) { + if err := e.Get(ctx, key, oCopy); err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return false, err + } + return false, nil + }) + errs = append(errs, errors.Wrapf(err, "key %s, %s is not being deleted from the testenv client cache", o.GetObjectKind().GroupVersionKind().String(), key)) + } + return kerrors.NewAggregate(errs) +} diff --git a/internal/test/envtest/generic_provider.go b/internal/test/envtest/generic_provider.go new file mode 100644 index 000000000..6e54ac023 --- /dev/null +++ b/internal/test/envtest/generic_provider.go @@ -0,0 +1,73 @@ +package envtest + +import ( + "strings" + + "github.com/gobuffalo/flect" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/contract" +) + +var ( + // infrastructureGroupVersion is group version used for infrastructure objects. + infrastructureGroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1beta1"} + + // genericInfrastructureMachineCRD is a generic infrastructure machine CRD. + genericInfrastructureMachineCRD = generateCRD(infrastructureGroupVersion.WithKind("GenericInfrastructureMachine")) + + // genericInfrastructureMachineTemplateCRD is a generic infrastructure machine template CRD. + genericInfrastructureMachineTemplateCRD = generateCRD(infrastructureGroupVersion.WithKind("GenericInfrastructureMachineTemplate")) +) + +func generateCRD(gvk schema.GroupVersionKind) *apiextensionsv1.CustomResourceDefinition { + return &apiextensionsv1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + APIVersion: apiextensionsv1.SchemeGroupVersion.String(), + Kind: "CustomResourceDefinition", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: contract.CalculateCRDName(gvk.Group, gvk.Kind), + Labels: map[string]string{ + clusterv1.GroupVersion.String(): "v1beta1", + }, + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: gvk.Group, + Scope: apiextensionsv1.NamespaceScoped, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: gvk.Kind, + Plural: flect.Pluralize(strings.ToLower(gvk.Kind)), + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: gvk.Version, + Served: true, + Storage: true, + Subresources: &apiextensionsv1.CustomResourceSubresources{ + Status: &apiextensionsv1.CustomResourceSubresourceStatus{}, + }, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "spec": { + Type: "object", + XPreserveUnknownFields: ptr.To(true), + }, + "status": { + Type: "object", + XPreserveUnknownFields: ptr.To(true), + }, + }, + }, + }, + }, + }, + }, + } +}