diff --git a/.golangci.yml b/.golangci.yml index 9914d416e7c..4847d0d702e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -101,14 +101,26 @@ linters: - pkg: sigs.k8s.io/controller-runtime alias: ctrl # CAPI - - pkg: sigs.k8s.io/cluster-api/api/v1beta1 + - pkg: sigs.k8s.io/cluster-api/api/core/v1beta2 alias: clusterv1 - # CAPI exp - - pkg: sigs.k8s.io/cluster-api/exp/api/v1beta1 - alias: expv1 + - pkg: sigs.k8s.io/cluster-api/api/core/v1beta1 + alias: clusterv1beta1 + - pkg: sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions + alias: v1beta1conditions + - pkg: sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2 + alias: v1beta2conditions + - pkg: sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch + alias: v1beta1patch + # CAPBK + - pkg: sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2 + alias: bootstrapv1 + - pkg: sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1 + alias: bootstrapv1beta1 # CAPZ - pkg: sigs.k8s.io/cluster-api-provider-azure/api/v1beta1 alias: infrav1 + - pkg: sigs.k8s.io/cluster-api-provider-azure/util/v1beta1 + alias: clusterv1beta1util # CAPZ exp - pkg: sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1 alias: infrav1exp diff --git a/Makefile b/Makefile index 3e3ab10532e..12d8367858a 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ export GOPROXY export GO111MODULE=on # Kubebuilder. -export KUBEBUILDER_ENVTEST_KUBERNETES_VERSION ?= 1.32.0 +export KUBEBUILDER_ENVTEST_KUBERNETES_VERSION ?= 1.34.0 export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT ?= 60s export KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT ?= 60s @@ -70,11 +70,11 @@ ifneq ($(abspath $(ROOT_DIR)),$(GOPATH)/src/sigs.k8s.io/cluster-api-provider-azu endif # Binaries. -CONTROLLER_GEN_VER := v0.16.1 +CONTROLLER_GEN_VER := v0.18.0 CONTROLLER_GEN_BIN := controller-gen CONTROLLER_GEN := $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER) -CONVERSION_GEN_VER := v0.31.0 +CONVERSION_GEN_VER := v0.33.0 CONVERSION_GEN_BIN := conversion-gen CONVERSION_GEN := $(TOOLS_BIN_DIR)/$(CONVERSION_GEN_BIN)-$(CONVERSION_GEN_VER) @@ -142,7 +142,7 @@ CODESPELL_BIN := codespell CODESPELL_DIST_DIR := codespell_dist CODESPELL := $(TOOLS_BIN_DIR)/$(CODESPELL_DIST_DIR)/$(CODESPELL_BIN) -SETUP_ENVTEST_VER := release-0.19 +SETUP_ENVTEST_VER := release-0.21 SETUP_ENVTEST_BIN := setup-envtest SETUP_ENVTEST := $(abspath $(TOOLS_BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER)) SETUP_ENVTEST_PKG := sigs.k8s.io/controller-runtime/tools/setup-envtest @@ -358,7 +358,7 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) $(KUBECTL) $(KIND) ## Create ./hack/create-custom-cloud-provider-config.sh # Deploy CAPI - timeout --foreground 300 bash -c "until curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.7/cluster-api-components.yaml | $(ENVSUBST) | $(KUBECTL) apply -f -; do sleep 5; done" + timeout --foreground 300 bash -c "until curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.2/cluster-api-components.yaml | $(ENVSUBST) | $(KUBECTL) apply -f -; do sleep 5; done" # Deploy CAAPH timeout --foreground 300 bash -c "until curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/download/v0.4.1/addon-components.yaml | $(ENVSUBST) | $(KUBECTL) apply -f -; do sleep 5; done" diff --git a/Tiltfile b/Tiltfile index 41905ccdb37..a7259de6226 100644 --- a/Tiltfile +++ b/Tiltfile @@ -22,7 +22,7 @@ settings = { "deploy_cert_manager": True, "preload_images_for_kind": True, "kind_cluster_name": "capz", - "capi_version": "v1.10.7", + "capi_version": "v1.11.2", "caaph_version": "v0.4.1", "cert_manager_version": "v1.19.1", "kubernetes_version": "v1.32.2", diff --git a/api/v1alpha1/azureasomanagedcluster_types.go b/api/v1alpha1/azureasomanagedcluster_types.go index 5d96edbcfff..9085ae885cb 100644 --- a/api/v1alpha1/azureasomanagedcluster_types.go +++ b/api/v1alpha1/azureasomanagedcluster_types.go @@ -18,7 +18,7 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -38,7 +38,7 @@ type AzureASOManagedClusterSpec struct { // Because this field is programmatically set by CAPZ after resource creation, we define it as +optional // in the API schema to permit resource admission. //+optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` } // AzureASOManagedClusterStatus defines the observed state of AzureASOManagedCluster. diff --git a/api/v1alpha1/azureasomanagedcontrolplane_types.go b/api/v1alpha1/azureasomanagedcontrolplane_types.go index 7e3a7210bee..8951211993a 100644 --- a/api/v1alpha1/azureasomanagedcontrolplane_types.go +++ b/api/v1alpha1/azureasomanagedcontrolplane_types.go @@ -18,7 +18,7 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AzureASOManagedControlPlaneKind is the kind for AzureASOManagedControlPlane. @@ -51,7 +51,7 @@ type AzureASOManagedControlPlaneStatus struct { // ControlPlaneEndpoint represents the endpoint for the cluster's API server. //+optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` } //+kubebuilder:object:root=true diff --git a/api/v1beta1/azureasomanagedcluster_types.go b/api/v1beta1/azureasomanagedcluster_types.go index 3063586ab57..b42d22da923 100644 --- a/api/v1beta1/azureasomanagedcluster_types.go +++ b/api/v1beta1/azureasomanagedcluster_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -38,7 +38,7 @@ type AzureASOManagedClusterSpec struct { // Because this field is programmatically set by CAPZ after resource creation, we define it as +optional // in the API schema to permit resource admission. //+optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` } // AzureASOManagedClusterStatus defines the observed state of AzureASOManagedCluster. diff --git a/api/v1beta1/azureasomanagedcontrolplane_types.go b/api/v1beta1/azureasomanagedcontrolplane_types.go index c32c98f83c0..4a0cb985b20 100644 --- a/api/v1beta1/azureasomanagedcontrolplane_types.go +++ b/api/v1beta1/azureasomanagedcontrolplane_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AzureASOManagedControlPlaneKind is the kind for AzureASOManagedControlPlane. @@ -51,7 +51,7 @@ type AzureASOManagedControlPlaneStatus struct { // ControlPlaneEndpoint represents the endpoint for the cluster's API server. //+optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` } // +kubebuilder:object:root=true diff --git a/api/v1beta1/azurecluster_types.go b/api/v1beta1/azurecluster_types.go index 8f4397b9ae8..08195026981 100644 --- a/api/v1beta1/azurecluster_types.go +++ b/api/v1beta1/azurecluster_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -53,7 +53,7 @@ type AzureClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. It is not recommended to set // this when creating an AzureCluster as CAPZ will set this for you. However, if it is set, CAPZ will not change it. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint,omitempty"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint,omitempty"` } // AzureClusterStatus defines the observed state of AzureCluster. @@ -65,7 +65,7 @@ type AzureClusterStatus struct { // See: https://learn.microsoft.com/azure/reliability/availability-zones-overview // This list will be used by Cluster API to try and spread the machines across the failure domains. // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` // Ready is true when the provider resource is ready. // +optional @@ -73,7 +73,7 @@ type AzureClusterStatus struct { // Conditions defines current service state of the AzureCluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // LongRunningOperationStates saves the states for Azure long-running operations so they can be continued on the // next reconciliation loop. @@ -114,12 +114,12 @@ type AzureClusterList struct { } // GetConditions returns the list of conditions for an AzureCluster API object. -func (c *AzureCluster) GetConditions() clusterv1.Conditions { +func (c *AzureCluster) GetConditions() clusterv1beta1.Conditions { return c.Status.Conditions } // SetConditions will set the given conditions on an AzureCluster object. -func (c *AzureCluster) SetConditions(conditions clusterv1.Conditions) { +func (c *AzureCluster) SetConditions(conditions clusterv1beta1.Conditions) { c.Status.Conditions = conditions } diff --git a/api/v1beta1/azurecluster_webhook_test.go b/api/v1beta1/azurecluster_webhook_test.go index a7848e7db00..b73d382518f 100644 --- a/api/v1beta1/azurecluster_webhook_test.go +++ b/api/v1beta1/azurecluster_webhook_test.go @@ -20,7 +20,7 @@ import ( "testing" . "github.com/onsi/gomega" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestAzureCluster_ValidateCreate(t *testing.T) { @@ -38,7 +38,7 @@ func TestAzureCluster_ValidateCreate(t *testing.T) { name: "azurecluster with pre-existing control plane endpoint - valid spec", cluster: func() *AzureCluster { cluster := createValidCluster() - cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + cluster.Spec.ControlPlaneEndpoint = clusterv1beta1.APIEndpoint{ Host: "apiserver.example.com", Port: 8443, } @@ -129,7 +129,7 @@ func TestAzureCluster_ValidateUpdate(t *testing.T) { name: "azurecluster with pre-existing control plane endpoint - valid spec", oldCluster: func() *AzureCluster { cluster := createValidCluster() - cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + cluster.Spec.ControlPlaneEndpoint = clusterv1beta1.APIEndpoint{ Host: "apiserver.example.com", Port: 8443, } @@ -137,7 +137,7 @@ func TestAzureCluster_ValidateUpdate(t *testing.T) { }(), cluster: func() *AzureCluster { cluster := createValidCluster() - cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + cluster.Spec.ControlPlaneEndpoint = clusterv1beta1.APIEndpoint{ Host: "apiserver.example.io", Port: 6443, } @@ -150,7 +150,7 @@ func TestAzureCluster_ValidateUpdate(t *testing.T) { oldCluster: createValidCluster(), cluster: func() *AzureCluster { cluster := createValidCluster() - cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + cluster.Spec.ControlPlaneEndpoint = clusterv1beta1.APIEndpoint{ Host: "apiserver.example.com", Port: 8443, } diff --git a/api/v1beta1/azureclusteridentity_types.go b/api/v1beta1/azureclusteridentity_types.go index 62ab41f0bd7..83559f407e2 100644 --- a/api/v1beta1/azureclusteridentity_types.go +++ b/api/v1beta1/azureclusteridentity_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AllowedNamespaces defines the namespaces the clusters are allowed to use the identity from @@ -89,7 +89,7 @@ type AzureClusterIdentitySpec struct { type AzureClusterIdentityStatus struct { // Conditions defines current service state of the AzureClusterIdentity. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -118,12 +118,12 @@ type AzureClusterIdentityList struct { } // GetConditions returns the list of conditions for an AzureClusterIdentity API object. -func (c *AzureClusterIdentity) GetConditions() clusterv1.Conditions { +func (c *AzureClusterIdentity) GetConditions() clusterv1beta1.Conditions { return c.Status.Conditions } // SetConditions will set the given conditions on an AzureClusterIdentity object. -func (c *AzureClusterIdentity) SetConditions(conditions clusterv1.Conditions) { +func (c *AzureClusterIdentity) SetConditions(conditions clusterv1beta1.Conditions) { c.Status.Conditions = conditions } diff --git a/api/v1beta1/azuremachine_default.go b/api/v1beta1/azuremachine_default.go index 80f5afcc399..6cbd17b2374 100644 --- a/api/v1beta1/azuremachine_default.go +++ b/api/v1beta1/azuremachine_default.go @@ -27,7 +27,7 @@ import ( "golang.org/x/crypto/ssh" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/uuid" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" utilSSH "sigs.k8s.io/cluster-api-provider-azure/util/ssh" @@ -172,7 +172,7 @@ func (s *AzureMachineSpec) SetNetworkInterfacesDefaults() { func GetOwnerAzureClusterNameAndNamespace(cli client.Client, clusterName string, namespace string, maxAttempts int) (azureClusterName string, azureClusterNamespace string, err error) { ctx := context.Background() - ownerCluster := &clusterv1.Cluster{} + ownerCluster := &clusterv1beta1.Cluster{} key := client.ObjectKey{ Namespace: namespace, Name: clusterName, @@ -223,7 +223,7 @@ func (m *AzureMachine) SetDefaults(client client.Client) error { } // Fetch the Cluster. - clusterName, ok := m.Labels[clusterv1.ClusterNameLabel] + clusterName, ok := m.Labels[clusterv1beta1.ClusterNameLabel] if !ok { errs = append(errs, errors.Errorf("failed to fetch ClusterName for AzureMachine %s/%s", m.Namespace, m.Name)) } diff --git a/api/v1beta1/azuremachine_default_test.go b/api/v1beta1/azuremachine_default_test.go index 1b467d634ea..ba4485ebdbd 100644 --- a/api/v1beta1/azuremachine_default_test.go +++ b/api/v1beta1/azuremachine_default_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -564,15 +564,15 @@ func (m mockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Ob switch obj := obj.(type) { case *AzureCluster: obj.Spec.SubscriptionID = "test-subscription-id" - case *clusterv1.Cluster: - obj.Spec = clusterv1.ClusterSpec{ + case *clusterv1beta1.Cluster: + obj.Spec = clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ Kind: AzureClusterKind, Name: "test-cluster", Namespace: "default", }, - ClusterNetwork: &clusterv1.ClusterNetwork{ - Services: &clusterv1.NetworkRanges{ + ClusterNetwork: &clusterv1beta1.ClusterNetwork{ + Services: &clusterv1beta1.NetworkRanges{ CIDRBlocks: []string{"192.168.0.0/26"}, }, }, @@ -607,7 +607,7 @@ func hardcodedAzureMachineWithSSHKey(sshPublicKey string) *AzureMachine { return &AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.ClusterNameLabel: "test-cluster", + clusterv1beta1.ClusterNameLabel: "test-cluster", }, }, Spec: AzureMachineSpec{ diff --git a/api/v1beta1/azuremachine_types.go b/api/v1beta1/azuremachine_types.go index 090f24edace..402888f180f 100644 --- a/api/v1beta1/azuremachine_types.go +++ b/api/v1beta1/azuremachine_types.go @@ -20,7 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -246,7 +246,7 @@ type AzureMachineStatus struct { // Conditions defines current service state of the AzureMachine. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // LongRunningOperationStates saves the states for Azure long-running operations so they can be continued on the // next reconciliation loop. @@ -297,12 +297,12 @@ type AzureMachineList struct { } // GetConditions returns the list of conditions for an AzureMachine API object. -func (m *AzureMachine) GetConditions() clusterv1.Conditions { +func (m *AzureMachine) GetConditions() clusterv1beta1.Conditions { return m.Status.Conditions } // SetConditions will set the given conditions on an AzureMachine object. -func (m *AzureMachine) SetConditions(conditions clusterv1.Conditions) { +func (m *AzureMachine) SetConditions(conditions clusterv1beta1.Conditions) { m.Status.Conditions = conditions } diff --git a/api/v1beta1/azuremachine_webhook_test.go b/api/v1beta1/azuremachine_webhook_test.go index fe0d1c13808..9f197e274a8 100644 --- a/api/v1beta1/azuremachine_webhook_test.go +++ b/api/v1beta1/azuremachine_webhook_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -947,7 +947,7 @@ func (m mockDefaultClient) Get(ctx context.Context, key client.ObjectKey, obj cl switch obj := obj.(type) { case *AzureCluster: obj.Spec.SubscriptionID = m.SubscriptionID - case *clusterv1.Cluster: + case *clusterv1beta1.Cluster: obj.Spec.InfrastructureRef = &corev1.ObjectReference{ Kind: AzureClusterKind, Name: "test-cluster", @@ -972,7 +972,7 @@ func TestAzureMachine_Default(t *testing.T) { publicKeyNotExistTest := test{machine: createMachineWithSSHPublicKey("")} testObjectMeta := metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.ClusterNameLabel: "test-cluster", + clusterv1beta1.ClusterNameLabel: "test-cluster", }, } diff --git a/api/v1beta1/azuremachinetemplate_types.go b/api/v1beta1/azuremachinetemplate_types.go index c4403f19dac..88c5d433394 100644 --- a/api/v1beta1/azuremachinetemplate_types.go +++ b/api/v1beta1/azuremachinetemplate_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AzureMachineTemplateSpec defines the desired state of AzureMachineTemplate. @@ -54,7 +54,7 @@ func init() { // AzureMachineTemplateResource describes the data needed to create an AzureMachine from a template. type AzureMachineTemplateResource struct { // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` // Spec is the specification of the desired behavior of the machine. Spec AzureMachineSpec `json:"spec"` } diff --git a/api/v1beta1/azuremanagedcluster_types.go b/api/v1beta1/azuremanagedcluster_types.go index 5a405633876..f3da4714fea 100644 --- a/api/v1beta1/azuremanagedcluster_types.go +++ b/api/v1beta1/azuremanagedcluster_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AzureManagedClusterSpec defines the desired state of AzureManagedCluster. @@ -28,7 +28,7 @@ type AzureManagedClusterSpec struct { // Because this field is programmatically set by CAPZ after resource creation, we define it as +optional // in the API schema to permit resource admission. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` } // AzureManagedClusterStatus defines the observed state of AzureManagedCluster. diff --git a/api/v1beta1/azuremanagedcluster_webhook_test.go b/api/v1beta1/azuremanagedcluster_webhook_test.go index ec4cc4b919d..c6999c6e635 100644 --- a/api/v1beta1/azuremanagedcluster_webhook_test.go +++ b/api/v1beta1/azuremanagedcluster_webhook_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/component-base/featuregate/testing" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" capifeature "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api-provider-azure/feature" @@ -40,7 +40,7 @@ func TestAzureManagedCluster_ValidateUpdate(t *testing.T) { oldAMC: &AzureManagedCluster{ ObjectMeta: metav1.ObjectMeta{}, Spec: AzureManagedClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Host: "aks-8622-h4h26c44.hcp.eastus.azmk8s.io", }, }, @@ -48,7 +48,7 @@ func TestAzureManagedCluster_ValidateUpdate(t *testing.T) { amc: &AzureManagedCluster{ ObjectMeta: metav1.ObjectMeta{}, Spec: AzureManagedClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Host: "aks-8622-h4h26c44.hcp.eastus.azmk8s.io", Port: 443, }, @@ -61,7 +61,7 @@ func TestAzureManagedCluster_ValidateUpdate(t *testing.T) { oldAMC: &AzureManagedCluster{ ObjectMeta: metav1.ObjectMeta{}, Spec: AzureManagedClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Port: 443, }, }, @@ -69,7 +69,7 @@ func TestAzureManagedCluster_ValidateUpdate(t *testing.T) { amc: &AzureManagedCluster{ ObjectMeta: metav1.ObjectMeta{}, Spec: AzureManagedClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Host: "aks-8622-h4h26c44.hcp.eastus.azmk8s.io", Port: 443, }, @@ -102,7 +102,7 @@ func TestAzureManagedCluster_ValidateCreate(t *testing.T) { name: "can set Spec.ControlPlaneEndpoint.Host during create (clusterctl move scenario)", amc: &AzureManagedCluster{ Spec: AzureManagedClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Host: "my-host", }, }, @@ -113,7 +113,7 @@ func TestAzureManagedCluster_ValidateCreate(t *testing.T) { name: "can set Spec.ControlPlaneEndpoint.Port during create (clusterctl move scenario)", amc: &AzureManagedCluster{ Spec: AzureManagedClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Port: 4443, }, }, diff --git a/api/v1beta1/azuremanagedcontrolplane_default.go b/api/v1beta1/azuremanagedcontrolplane_default.go index 66b628fed93..80d0fff6b53 100644 --- a/api/v1beta1/azuremanagedcontrolplane_default.go +++ b/api/v1beta1/azuremanagedcontrolplane_default.go @@ -23,7 +23,7 @@ import ( "golang.org/x/crypto/ssh" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" utilSSH "sigs.k8s.io/cluster-api-provider-azure/util/ssh" @@ -43,7 +43,7 @@ const ( // setDefaultResourceGroupName sets the default ResourceGroupName for an AzureManagedControlPlane. func (m *AzureManagedControlPlane) setDefaultResourceGroupName() { if m.Spec.ResourceGroupName == "" { - if clusterName, ok := m.Labels[clusterv1.ClusterNameLabel]; ok { + if clusterName, ok := m.Labels[clusterv1beta1.ClusterNameLabel]; ok { m.Spec.ResourceGroupName = clusterName } } @@ -103,7 +103,7 @@ func (m *AzureManagedControlPlane) setDefaultSubnet() { func setDefaultFleetsMember(fleetsMember *FleetsMember, labels map[string]string) *FleetsMember { result := fleetsMember.DeepCopy() if fleetsMember != nil { - if clusterName, ok := labels[clusterv1.ClusterNameLabel]; ok && fleetsMember.Name == "" { + if clusterName, ok := labels[clusterv1beta1.ClusterNameLabel]; ok && fleetsMember.Name == "" { result.Name = clusterName } } diff --git a/api/v1beta1/azuremanagedcontrolplane_types.go b/api/v1beta1/azuremanagedcontrolplane_types.go index 8938e26508c..0f4055684fe 100644 --- a/api/v1beta1/azuremanagedcontrolplane_types.go +++ b/api/v1beta1/azuremanagedcontrolplane_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -156,7 +156,7 @@ type AzureManagedControlPlaneSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // Immutable, populated by the AKS API at create. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint,omitempty"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint,omitempty"` // SSHPublicKey is a string literal containing an ssh public key base64 encoded. // Use empty string to autogenerate new key. Use null value to not set key. @@ -424,7 +424,7 @@ type AzureManagedControlPlaneStatus struct { // Conditions defines current service state of the AzureManagedControlPlane. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // LongRunningOperationStates saves the states for Azure long-running operations so they can be continued on the // next reconciliation loop. @@ -689,12 +689,12 @@ type AzureManagedControlPlaneList struct { } // GetConditions returns the list of conditions for an AzureManagedControlPlane API object. -func (m *AzureManagedControlPlane) GetConditions() clusterv1.Conditions { +func (m *AzureManagedControlPlane) GetConditions() clusterv1beta1.Conditions { return m.Status.Conditions } // SetConditions will set the given conditions on an AzureManagedControlPlane object. -func (m *AzureManagedControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (m *AzureManagedControlPlane) SetConditions(conditions clusterv1beta1.Conditions) { m.Status.Conditions = conditions } diff --git a/api/v1beta1/azuremanagedcontrolplane_webhook.go b/api/v1beta1/azuremanagedcontrolplane_webhook.go index 3eff0314865..39abf1d34c8 100644 --- a/api/v1beta1/azuremanagedcontrolplane_webhook.go +++ b/api/v1beta1/azuremanagedcontrolplane_webhook.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -523,12 +523,12 @@ func validateManagedClusterNetwork(cli client.Client, labels map[string]string, ctx := context.Background() // Fetch the Cluster. - clusterName, ok := labels[clusterv1.ClusterNameLabel] + clusterName, ok := labels[clusterv1beta1.ClusterNameLabel] if !ok { return nil } - ownerCluster := &clusterv1.Cluster{} + ownerCluster := &clusterv1beta1.Cluster{} key := client.ObjectKey{ Namespace: namespace, Name: clusterName, diff --git a/api/v1beta1/azuremanagedcontrolplane_webhook_test.go b/api/v1beta1/azuremanagedcontrolplane_webhook_test.go index 3b25e3bc7fb..47c786b866b 100644 --- a/api/v1beta1/azuremanagedcontrolplane_webhook_test.go +++ b/api/v1beta1/azuremanagedcontrolplane_webhook_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" capifeature "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api-provider-azure/feature" @@ -38,7 +38,7 @@ func TestDefaultingWebhook(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "fooName", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "fooCluster", + clusterv1beta1.ClusterNameLabel: "fooCluster", }, }, Spec: AzureManagedControlPlaneSpec{ @@ -1560,7 +1560,7 @@ func TestAzureManagedControlPlane_ValidateCreate(t *testing.T) { name: "set Spec.ControlPlaneEndpoint.Host during create (clusterctl move scenario)", amcp: &AzureManagedControlPlane{ Spec: AzureManagedControlPlaneSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Host: "my-host", }, SSHPublicKey: ptr.To(generateSSHPublicKey(true)), @@ -1582,7 +1582,7 @@ func TestAzureManagedControlPlane_ValidateCreate(t *testing.T) { name: "can set Spec.ControlPlaneEndpoint.Port during create (clusterctl move scenario)", amcp: &AzureManagedControlPlane{ Spec: AzureManagedControlPlaneSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Port: 444, }, SSHPublicKey: ptr.To(generateSSHPublicKey(true)), @@ -4037,7 +4037,7 @@ func TestValidateAMCPVirtualNetwork(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "fooName", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "fooCluster", + clusterv1beta1.ClusterNameLabel: "fooCluster", }, }, Spec: AzureManagedControlPlaneSpec{ @@ -4066,7 +4066,7 @@ func TestValidateAMCPVirtualNetwork(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "fooName", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "fooCluster", + clusterv1beta1.ClusterNameLabel: "fooCluster", }, }, Spec: AzureManagedControlPlaneSpec{ @@ -4095,7 +4095,7 @@ func TestValidateAMCPVirtualNetwork(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "fooName", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "fooCluster", + clusterv1beta1.ClusterNameLabel: "fooCluster", }, }, Spec: AzureManagedControlPlaneSpec{ @@ -4124,7 +4124,7 @@ func TestValidateAMCPVirtualNetwork(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "fooName", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "fooCluster", + clusterv1beta1.ClusterNameLabel: "fooCluster", }, }, Spec: AzureManagedControlPlaneSpec{ diff --git a/api/v1beta1/azuremanagedmachinepool_types.go b/api/v1beta1/azuremanagedmachinepool_types.go index 166dbb77953..de21f87f5d5 100644 --- a/api/v1beta1/azuremanagedmachinepool_types.go +++ b/api/v1beta1/azuremanagedmachinepool_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -473,7 +473,7 @@ type AzureManagedMachinePoolStatus struct { // Conditions defines current service state of the AzureManagedControlPlane. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // LongRunningOperationStates saves the states for Azure long-running operations so they can be continued on the // next reconciliation loop. @@ -513,12 +513,12 @@ type AzureManagedMachinePoolList struct { } // GetConditions returns the list of conditions for an AzureManagedMachinePool API object. -func (m *AzureManagedMachinePool) GetConditions() clusterv1.Conditions { +func (m *AzureManagedMachinePool) GetConditions() clusterv1beta1.Conditions { return m.Status.Conditions } // SetConditions will set the given conditions on an AzureManagedMachinePool object. -func (m *AzureManagedMachinePool) SetConditions(conditions clusterv1.Conditions) { +func (m *AzureManagedMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { m.Status.Conditions = conditions } diff --git a/api/v1beta1/azuremanagedmachinepool_webhook.go b/api/v1beta1/azuremanagedmachinepool_webhook.go index 4437c15307e..56fc12f0179 100644 --- a/api/v1beta1/azuremanagedmachinepool_webhook.go +++ b/api/v1beta1/azuremanagedmachinepool_webhook.go @@ -30,7 +30,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterctlv1alpha3 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -309,12 +309,12 @@ func validateLastSystemNodePool(cli client.Client, labels map[string]string, nam ctx := context.Background() // Fetch the Cluster. - clusterName, ok := labels[clusterv1.ClusterNameLabel] + clusterName, ok := labels[clusterv1beta1.ClusterNameLabel] if !ok { return nil } - ownerCluster := &clusterv1.Cluster{} + ownerCluster := &clusterv1beta1.Cluster{} key := client.ObjectKey{ Namespace: namespace, Name: clusterName, @@ -335,8 +335,8 @@ func validateLastSystemNodePool(cli client.Client, labels map[string]string, nam opt1 := client.InNamespace(namespace) opt2 := client.MatchingLabels(map[string]string{ - clusterv1.ClusterNameLabel: clusterName, - LabelAgentPoolMode: string(NodePoolModeSystem), + clusterv1beta1.ClusterNameLabel: clusterName, + LabelAgentPoolMode: string(NodePoolModeSystem), }) ammpList := &AzureManagedMachinePoolList{} diff --git a/api/v1beta1/azuremanagedmachinepool_webhook_test.go b/api/v1beta1/azuremanagedmachinepool_webhook_test.go index eb41e4b0d6e..e3627a96002 100644 --- a/api/v1beta1/azuremanagedmachinepool_webhook_test.go +++ b/api/v1beta1/azuremanagedmachinepool_webhook_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterctlv1alpha3 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" capifeature "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/controller-runtime/pkg/client" @@ -1349,7 +1349,7 @@ func TestAzureManagedMachinePool_validateLastSystemNodePool(t *testing.T) { tests := []struct { name string ammp *AzureManagedMachinePool - cluster *clusterv1.Cluster + cluster *clusterv1beta1.Cluster wantErr bool }{ { @@ -1357,9 +1357,9 @@ func TestAzureManagedMachinePool_validateLastSystemNodePool(t *testing.T) { // Note that Owner Cluster's deletion timestamp is nil and Owner cluster being paused does not matter anymore. name: "AzureManagedMachinePool (AMMP) should be deleted if this AMMP has the annotation 'cluster.x-k8s.io/move-to-delete' with the owner cluster being paused and 'No' deletion timestamp", ammp: systemMachinePoolWithDeletionAnnotation, - cluster: &clusterv1.Cluster{ + cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ - Name: systemMachinePool.GetLabels()[clusterv1.ClusterNameLabel], + Name: systemMachinePool.GetLabels()[clusterv1beta1.ClusterNameLabel], Namespace: systemMachinePool.Namespace, Finalizers: finalizers, }, @@ -1370,9 +1370,9 @@ func TestAzureManagedMachinePool_validateLastSystemNodePool(t *testing.T) { // AzureManagedMachinePool will be deleted since Owner Cluster has been marked for deletion name: "AzureManagedMachinePool should be deleted since the Cluster is paused with a deletion timestamp", ammp: systemMachinePool, - cluster: &clusterv1.Cluster{ + cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ - Name: systemMachinePool.GetLabels()[clusterv1.ClusterNameLabel], + Name: systemMachinePool.GetLabels()[clusterv1beta1.ClusterNameLabel], Namespace: systemMachinePool.Namespace, DeletionTimestamp: &deletionTime, Finalizers: finalizers, @@ -1383,9 +1383,9 @@ func TestAzureManagedMachinePool_validateLastSystemNodePool(t *testing.T) { { name: "AzureManagedMachinePool should not be deleted without a deletion timestamp on Owner Cluster and having one system pool node(invalid delete)", ammp: systemMachinePool, - cluster: &clusterv1.Cluster{ + cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ - Name: systemMachinePool.GetLabels()[clusterv1.ClusterNameLabel], + Name: systemMachinePool.GetLabels()[clusterv1beta1.ClusterNameLabel], Namespace: systemMachinePool.Namespace, }, }, @@ -1394,9 +1394,9 @@ func TestAzureManagedMachinePool_validateLastSystemNodePool(t *testing.T) { { name: "AzureManagedMachinePool should be deleted when Cluster is set with a deletion timestamp having one system pool node(valid delete)", ammp: systemMachinePool, - cluster: &clusterv1.Cluster{ + cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ - Name: systemMachinePool.GetLabels()[clusterv1.ClusterNameLabel], + Name: systemMachinePool.GetLabels()[clusterv1beta1.ClusterNameLabel], Namespace: systemMachinePool.Namespace, DeletionTimestamp: &deletionTime, Finalizers: finalizers, @@ -1411,7 +1411,7 @@ func TestAzureManagedMachinePool_validateLastSystemNodePool(t *testing.T) { g := NewWithT(t) scheme := runtime.NewScheme() _ = AddToScheme(scheme) - _ = clusterv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(tc.cluster, tc.ammp).Build() err := validateLastSystemNodePool(fakeClient, tc.ammp.Spec.NodeLabels, tc.ammp.Namespace, tc.ammp.Annotations) if tc.wantErr { @@ -1439,14 +1439,14 @@ func getManagedMachinePoolWithSystemMode() *AzureManagedMachinePool { ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, Labels: map[string]string{ - clusterv1.ClusterNameLabel: "test-cluster", - LabelAgentPoolMode: string(NodePoolModeSystem), + clusterv1beta1.ClusterNameLabel: "test-cluster", + LabelAgentPoolMode: string(NodePoolModeSystem), }, }, Spec: AzureManagedMachinePoolSpec{ AzureManagedMachinePoolClassSpec: AzureManagedMachinePoolClassSpec{ NodeLabels: map[string]string{ - clusterv1.ClusterNameLabel: "test-cluster", + clusterv1beta1.ClusterNameLabel: "test-cluster", }, }, }, diff --git a/api/v1beta1/consts.go b/api/v1beta1/consts.go index ab8012c8e31..c6fa7ca8654 100644 --- a/api/v1beta1/consts.go +++ b/api/v1beta1/consts.go @@ -16,12 +16,12 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" // AzureCluster Conditions and Reasons. const ( // NetworkInfrastructureReadyCondition reports of current status of cluster infrastructure. - NetworkInfrastructureReadyCondition clusterv1.ConditionType = "NetworkInfrastructureReady" + NetworkInfrastructureReadyCondition clusterv1beta1.ConditionType = "NetworkInfrastructureReady" // NamespaceNotAllowedByIdentity used to indicate cluster in a namespace not allowed by identity. NamespaceNotAllowedByIdentity = "NamespaceNotAllowedByIdentity" ) @@ -29,9 +29,9 @@ const ( // AzureMachine Conditions and Reasons. const ( // VMRunningCondition reports on current status of the Azure VM. - VMRunningCondition clusterv1.ConditionType = "VMRunning" + VMRunningCondition clusterv1beta1.ConditionType = "VMRunning" // VMIdentitiesReadyCondition reports on the readiness of the Azure VM identities. - VMIdentitiesReadyCondition clusterv1.ConditionType = "VMIdentitiesReady" + VMIdentitiesReadyCondition clusterv1beta1.ConditionType = "VMIdentitiesReady" // VMCreatingReason used when the vm creation is in progress. VMCreatingReason = "VMCreating" // VMUpdatingReason used when the vm updating is in progress. @@ -47,7 +47,7 @@ const ( // WaitingForBootstrapDataReason used when machine is waiting for bootstrap data to be ready before proceeding. WaitingForBootstrapDataReason = "WaitingForBootstrapData" // BootstrapSucceededCondition reports the result of the execution of the bootstrap data on the machine. - BootstrapSucceededCondition clusterv1.ConditionType = "BootstrapSucceeded" + BootstrapSucceededCondition clusterv1beta1.ConditionType = "BootstrapSucceeded" // BootstrapInProgressReason is used to indicate the bootstrap data has not finished executing. BootstrapInProgressReason = "BootstrapInProgress" // BootstrapFailedReason is used to indicate the bootstrap process ran into an error. @@ -57,7 +57,7 @@ const ( // AzureMachinePool Conditions and Reasons. const ( // ScaleSetRunningCondition reports on current status of the Azure Scale Set. - ScaleSetRunningCondition clusterv1.ConditionType = "ScaleSetRunning" + ScaleSetRunningCondition clusterv1beta1.ConditionType = "ScaleSetRunning" // ScaleSetCreatingReason used when the scale set creation is in progress. ScaleSetCreatingReason = "ScaleSetCreating" // ScaleSetUpdatingReason used when the scale set updating is in progress. @@ -68,14 +68,14 @@ const ( ScaleSetProvisionFailedReason = "ScaleSetProvisionFailed" // ScaleSetDesiredReplicasCondition reports on the scaling state of the machine pool. - ScaleSetDesiredReplicasCondition clusterv1.ConditionType = "ScaleSetDesiredReplicas" + ScaleSetDesiredReplicasCondition clusterv1beta1.ConditionType = "ScaleSetDesiredReplicas" // ScaleSetScaleUpReason describes the machine pool scaling up. ScaleSetScaleUpReason = "ScaleSetScalingUp" // ScaleSetScaleDownReason describes the machine pool scaling down. ScaleSetScaleDownReason = "ScaleSetScalingDown" // ScaleSetModelUpdatedCondition reports on the model state of the pool. - ScaleSetModelUpdatedCondition clusterv1.ConditionType = "ScaleSetModelUpdated" + ScaleSetModelUpdatedCondition clusterv1beta1.ConditionType = "ScaleSetModelUpdated" // ScaleSetModelOutOfDateReason describes the machine pool model being out of date. ScaleSetModelOutOfDateReason = "ScaleSetModelOutOfDate" ) @@ -83,57 +83,57 @@ const ( // AzureManagedCluster Conditions and Reasons. const ( // ManagedClusterRunningCondition means the AKS cluster exists and is in a running state. - ManagedClusterRunningCondition clusterv1.ConditionType = "ManagedClusterRunning" + ManagedClusterRunningCondition clusterv1beta1.ConditionType = "ManagedClusterRunning" // AgentPoolsReadyCondition means the AKS agent pools exist and are ready to be used. - AgentPoolsReadyCondition clusterv1.ConditionType = "AgentPoolsReady" + AgentPoolsReadyCondition clusterv1beta1.ConditionType = "AgentPoolsReady" // AzureResourceAvailableCondition means the AKS cluster is healthy according to Azure's Resource Health API. - AzureResourceAvailableCondition clusterv1.ConditionType = "AzureResourceAvailable" + AzureResourceAvailableCondition clusterv1beta1.ConditionType = "AzureResourceAvailable" ) // Azure Services Conditions and Reasons. const ( // ResourceGroupReadyCondition means the resource group exists and is ready to be used. - ResourceGroupReadyCondition clusterv1.ConditionType = "ResourceGroupReady" + ResourceGroupReadyCondition clusterv1beta1.ConditionType = "ResourceGroupReady" // VNetReadyCondition means the virtual network exists and is ready to be used. - VNetReadyCondition clusterv1.ConditionType = "VNetReady" + VNetReadyCondition clusterv1beta1.ConditionType = "VNetReady" // VnetPeeringReadyCondition means the virtual network peerings exist and are ready to be used. - VnetPeeringReadyCondition clusterv1.ConditionType = "VnetPeeringReady" + VnetPeeringReadyCondition clusterv1beta1.ConditionType = "VnetPeeringReady" // SecurityGroupsReadyCondition means the security groups exist and are ready to be used. - SecurityGroupsReadyCondition clusterv1.ConditionType = "SecurityGroupsReady" + SecurityGroupsReadyCondition clusterv1beta1.ConditionType = "SecurityGroupsReady" // RouteTablesReadyCondition means the route tables exist and are ready to be used. - RouteTablesReadyCondition clusterv1.ConditionType = "RouteTablesReady" + RouteTablesReadyCondition clusterv1beta1.ConditionType = "RouteTablesReady" // PublicIPsReadyCondition means the public IPs exist and are ready to be used. - PublicIPsReadyCondition clusterv1.ConditionType = "PublicIPsReady" + PublicIPsReadyCondition clusterv1beta1.ConditionType = "PublicIPsReady" // NATGatewaysReadyCondition means the NAT gateways exist and are ready to be used. - NATGatewaysReadyCondition clusterv1.ConditionType = "NATGatewaysReady" + NATGatewaysReadyCondition clusterv1beta1.ConditionType = "NATGatewaysReady" // SubnetsReadyCondition means the subnets exist and are ready to be used. - SubnetsReadyCondition clusterv1.ConditionType = "SubnetsReady" + SubnetsReadyCondition clusterv1beta1.ConditionType = "SubnetsReady" // LoadBalancersReadyCondition means the load balancers exist and are ready to be used. - LoadBalancersReadyCondition clusterv1.ConditionType = "LoadBalancersReady" + LoadBalancersReadyCondition clusterv1beta1.ConditionType = "LoadBalancersReady" // PrivateDNSZoneReadyCondition means the private DNS zone exists and is ready to be used. - PrivateDNSZoneReadyCondition clusterv1.ConditionType = "PrivateDNSZoneReady" + PrivateDNSZoneReadyCondition clusterv1beta1.ConditionType = "PrivateDNSZoneReady" // PrivateDNSLinkReadyCondition means the private DNS links exist and are ready to be used. - PrivateDNSLinkReadyCondition clusterv1.ConditionType = "PrivateDNSLinkReady" + PrivateDNSLinkReadyCondition clusterv1beta1.ConditionType = "PrivateDNSLinkReady" // PrivateDNSRecordReadyCondition means the private DNS records exist and are ready to be used. - PrivateDNSRecordReadyCondition clusterv1.ConditionType = "PrivateDNSRecordReady" + PrivateDNSRecordReadyCondition clusterv1beta1.ConditionType = "PrivateDNSRecordReady" // BastionHostReadyCondition means the bastion host exists and is ready to be used. - BastionHostReadyCondition clusterv1.ConditionType = "BastionHostReady" + BastionHostReadyCondition clusterv1beta1.ConditionType = "BastionHostReady" // InboundNATRulesReadyCondition means the inbound NAT rules exist and are ready to be used. - InboundNATRulesReadyCondition clusterv1.ConditionType = "InboundNATRulesReady" + InboundNATRulesReadyCondition clusterv1beta1.ConditionType = "InboundNATRulesReady" // AvailabilitySetReadyCondition means the availability set exists and is ready to be used. - AvailabilitySetReadyCondition clusterv1.ConditionType = "AvailabilitySetReady" + AvailabilitySetReadyCondition clusterv1beta1.ConditionType = "AvailabilitySetReady" // RoleAssignmentReadyCondition means the role assignment exists and is ready to be used. - RoleAssignmentReadyCondition clusterv1.ConditionType = "RoleAssignmentReady" + RoleAssignmentReadyCondition clusterv1beta1.ConditionType = "RoleAssignmentReady" // DisksReadyCondition means the disks exist and are ready to be used. - DisksReadyCondition clusterv1.ConditionType = "DisksReady" + DisksReadyCondition clusterv1beta1.ConditionType = "DisksReady" // NetworkInterfaceReadyCondition means the network interfaces exist and are ready to be used. - NetworkInterfaceReadyCondition clusterv1.ConditionType = "NetworkInterfacesReady" + NetworkInterfaceReadyCondition clusterv1beta1.ConditionType = "NetworkInterfacesReady" // PrivateEndpointsReadyCondition means the private endpoints exist and are ready to be used. - PrivateEndpointsReadyCondition clusterv1.ConditionType = "PrivateEndpointsReady" + PrivateEndpointsReadyCondition clusterv1beta1.ConditionType = "PrivateEndpointsReady" // FleetReadyCondition means the Fleet exists and is ready to be used. - FleetReadyCondition clusterv1.ConditionType = "FleetReady" + FleetReadyCondition clusterv1beta1.ConditionType = "FleetReady" // AKSExtensionsReadyCondition means the AKS Extensions exist and are ready to be used. - AKSExtensionsReadyCondition clusterv1.ConditionType = "AKSExtensionsReady" + AKSExtensionsReadyCondition clusterv1beta1.ConditionType = "AKSExtensionsReady" // CreatingReason means the resource is being created. CreatingReason = "Creating" diff --git a/api/v1beta1/types_class.go b/api/v1beta1/types_class.go index 452eab517f8..358cb1e6082 100644 --- a/api/v1beta1/types_class.go +++ b/api/v1beta1/types_class.go @@ -19,7 +19,7 @@ package v1beta1 import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AzureClusterClassSpec defines the AzureCluster properties that may be shared across several Azure clusters. @@ -76,7 +76,7 @@ type AzureClusterClassSpec struct { // which is a separated group of datacenters within a region. // See: https://learn.microsoft.com/azure/reliability/availability-zones-overview // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` } // AzureManagedControlPlaneClassSpec defines the AzureManagedControlPlane properties that may be shared across several azure managed control planes. diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 71792a2ce24..0ed74f0cbb9 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -1062,7 +1062,7 @@ func (in *AzureClusterClassSpec) DeepCopyInto(out *AzureClusterClassSpec) { } if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(apiv1beta1.FailureDomains, len(*in)) + *out = make(corev1beta1.FailureDomains, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -1164,7 +1164,7 @@ func (in *AzureClusterIdentityStatus) DeepCopyInto(out *AzureClusterIdentityStat *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1237,14 +1237,14 @@ func (in *AzureClusterStatus) DeepCopyInto(out *AzureClusterStatus) { *out = *in if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(apiv1beta1.FailureDomains, len(*in)) + *out = make(corev1beta1.FailureDomains, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1642,7 +1642,7 @@ func (in *AzureMachineStatus) DeepCopyInto(out *AzureMachineStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2198,7 +2198,7 @@ func (in *AzureManagedControlPlaneStatus) DeepCopyInto(out *AzureManagedControlP *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2575,7 +2575,7 @@ func (in *AzureManagedMachinePoolStatus) DeepCopyInto(out *AzureManagedMachinePo } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/azure/converters/resourcehealth.go b/azure/converters/resourcehealth.go index c6aae20fe20..084ca7392e9 100644 --- a/azure/converters/resourcehealth.go +++ b/azure/converters/resourcehealth.go @@ -21,14 +21,14 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcehealth/armresourcehealth" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" ) // SDKAvailabilityStatusToCondition converts an Azure Resource Health availability status to a status condition. -func SDKAvailabilityStatusToCondition(availStatus armresourcehealth.AvailabilityStatus) *clusterv1.Condition { +func SDKAvailabilityStatusToCondition(availStatus armresourcehealth.AvailabilityStatus) *clusterv1beta1.Condition { if availStatus.Properties == nil { return conditions.FalseCondition(infrav1.AzureResourceAvailableCondition, "", "", "") } @@ -54,12 +54,12 @@ func SDKAvailabilityStatusToCondition(availStatus armresourcehealth.Availability } } - var severity clusterv1.ConditionSeverity + var severity clusterv1beta1.ConditionSeverity switch ptr.Deref(availStatus.Properties.AvailabilityState, "") { case armresourcehealth.AvailabilityStateValuesUnavailable: - severity = clusterv1.ConditionSeverityError + severity = clusterv1beta1.ConditionSeverityError case armresourcehealth.AvailabilityStateValuesDegraded, armresourcehealth.AvailabilityStateValuesUnknown: - severity = clusterv1.ConditionSeverityWarning + severity = clusterv1beta1.ConditionSeverityWarning } var message string diff --git a/azure/converters/resourcehealth_test.go b/azure/converters/resourcehealth_test.go index b75ba1d0dcd..b0f42929fbe 100644 --- a/azure/converters/resourcehealth_test.go +++ b/azure/converters/resourcehealth_test.go @@ -23,19 +23,19 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestAzureAvailabilityStatusToCondition(t *testing.T) { tests := []struct { name string avail armresourcehealth.AvailabilityStatus - expected *clusterv1.Condition + expected *clusterv1beta1.Condition }{ { name: "empty", avail: armresourcehealth.AvailabilityStatus{}, - expected: &clusterv1.Condition{ + expected: &clusterv1beta1.Condition{ Status: corev1.ConditionFalse, }, }, @@ -46,7 +46,7 @@ func TestAzureAvailabilityStatusToCondition(t *testing.T) { AvailabilityState: ptr.To(armresourcehealth.AvailabilityStateValuesAvailable), }, }, - expected: &clusterv1.Condition{ + expected: &clusterv1beta1.Condition{ Status: corev1.ConditionTrue, }, }, @@ -59,9 +59,9 @@ func TestAzureAvailabilityStatusToCondition(t *testing.T) { Summary: ptr.To("The Summary"), }, }, - expected: &clusterv1.Condition{ + expected: &clusterv1beta1.Condition{ Status: corev1.ConditionFalse, - Severity: clusterv1.ConditionSeverityError, + Severity: clusterv1beta1.ConditionSeverityError, Reason: "ThisIsAReason", Message: "The Summary", }, @@ -75,9 +75,9 @@ func TestAzureAvailabilityStatusToCondition(t *testing.T) { Summary: ptr.To("The Summary"), }, }, - expected: &clusterv1.Condition{ + expected: &clusterv1beta1.Condition{ Status: corev1.ConditionFalse, - Severity: clusterv1.ConditionSeverityWarning, + Severity: clusterv1beta1.ConditionSeverityWarning, Reason: "TheReason", Message: "The Summary", }, diff --git a/azure/interfaces.go b/azure/interfaces.go index 009670ad422..4c8906aa3e1 100644 --- a/azure/interfaces.go +++ b/azure/interfaces.go @@ -25,7 +25,7 @@ import ( "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -102,9 +102,9 @@ type AsyncStatusUpdater interface { SetLongRunningOperationState(*infrav1.Future) GetLongRunningOperationState(string, string, string) *infrav1.Future DeleteLongRunningOperationState(string, string, string) - UpdatePutStatus(clusterv1.ConditionType, string, error) - UpdateDeleteStatus(clusterv1.ConditionType, string, error) - UpdatePatchStatus(clusterv1.ConditionType, string, error) + UpdatePutStatus(clusterv1beta1.ConditionType, string, error) + UpdateDeleteStatus(clusterv1beta1.ConditionType, string, error) + UpdatePatchStatus(clusterv1beta1.ConditionType, string, error) AsyncReconciler } diff --git a/azure/mock_azure/azure_mock.go b/azure/mock_azure/azure_mock.go index 7f65eb27d78..ed4c0abb5e6 100644 --- a/azure/mock_azure/azure_mock.go +++ b/azure/mock_azure/azure_mock.go @@ -37,7 +37,7 @@ import ( gomock "go.uber.org/mock/gomock" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" client "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/azure/scope/cluster.go b/azure/scope/cluster.go index c9760b5ad7e..eed2208699b 100644 --- a/azure/scope/cluster.go +++ b/azure/scope/cluster.go @@ -32,9 +32,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/net" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -60,7 +60,7 @@ import ( type ClusterScopeParams struct { AzureClients Client client.Client - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster AzureCluster *infrav1.AzureCluster Cache *ClusterCache Timeouts azure.AsyncReconciler @@ -93,7 +93,7 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc params.Cache = &ClusterCache{} } - helper, err := patch.NewHelper(params.AzureCluster, params.Client) + helper, err := v1beta1patch.NewHelper(params.AzureCluster, params.Client) if err != nil { return nil, errors.Errorf("failed to init patch helper: %v", err) } @@ -112,11 +112,11 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc // ClusterScope defines the basic context for an actuator to operate upon. type ClusterScope struct { Client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper cache *ClusterCache AzureClients - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster AzureCluster *infrav1.AzureCluster azure.AsyncReconciler } @@ -927,7 +927,7 @@ func (s *ClusterScope) GenerateLegacyFQDN() (ip string, domain string) { // ListOptionsLabelSelector returns a ListOptions with a label selector for clusterName. func (s *ClusterScope) ListOptionsLabelSelector() client.ListOption { return client.MatchingLabels(map[string]string{ - clusterv1.ClusterNameLabel: s.Cluster.Name, + clusterv1beta1.ClusterNameLabel: s.Cluster.Name, }) } @@ -936,13 +936,13 @@ func (s *ClusterScope) PatchObject(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "scope.ClusterScope.PatchObject") defer done() - conditions.SetSummary(s.AzureCluster) + v1beta1conditions.SetSummary(s.AzureCluster) return s.patchHelper.Patch( ctx, s.AzureCluster, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - clusterv1.ReadyCondition, + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + clusterv1beta1.ReadyCondition, infrav1.ResourceGroupReadyCondition, infrav1.RouteTablesReadyCondition, infrav1.NetworkInfrastructureReadyCondition, @@ -993,9 +993,9 @@ func (s *ClusterScope) APIServerHost() string { // SetFailureDomain sets a failure domain in a cluster's status by its id. // The provided failure domain spec may be overridden to false by cluster's spec property. -func (s *ClusterScope) SetFailureDomain(id string, spec clusterv1.FailureDomainSpec) { +func (s *ClusterScope) SetFailureDomain(id string, spec clusterv1beta1.FailureDomainSpec) { if s.AzureCluster.Status.FailureDomains == nil { - s.AzureCluster.Status.FailureDomains = make(clusterv1.FailureDomains) + s.AzureCluster.Status.FailureDomains = make(clusterv1beta1.FailureDomains) } if fd, ok := s.AzureCluster.Spec.FailureDomains[id]; ok && !fd.ControlPlane { @@ -1124,38 +1124,38 @@ func (s *ClusterScope) DeleteLongRunningOperationState(name, service, futureType } // UpdateDeleteStatus updates a condition on the AzureCluster status after a DELETE operation. -func (s *ClusterScope) UpdateDeleteStatus(condition clusterv1.ConditionType, service string, err error) { +func (s *ClusterScope) UpdateDeleteStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkFalse(s.AzureCluster, condition, infrav1.DeletedReason, clusterv1.ConditionSeverityInfo, "%s successfully deleted", service) + v1beta1conditions.MarkFalse(s.AzureCluster, condition, infrav1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "%s successfully deleted", service) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(s.AzureCluster, condition, infrav1.DeletingReason, clusterv1.ConditionSeverityInfo, "%s deleting", service) + v1beta1conditions.MarkFalse(s.AzureCluster, condition, infrav1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "%s deleting", service) default: - conditions.MarkFalse(s.AzureCluster, condition, infrav1.DeletionFailedReason, clusterv1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(s.AzureCluster, condition, infrav1.DeletionFailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error()) } } // UpdatePutStatus updates a condition on the AzureCluster status after a PUT operation. -func (s *ClusterScope) UpdatePutStatus(condition clusterv1.ConditionType, service string, err error) { +func (s *ClusterScope) UpdatePutStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkTrue(s.AzureCluster, condition) + v1beta1conditions.MarkTrue(s.AzureCluster, condition) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(s.AzureCluster, condition, infrav1.CreatingReason, clusterv1.ConditionSeverityInfo, "%s creating or updating", service) + v1beta1conditions.MarkFalse(s.AzureCluster, condition, infrav1.CreatingReason, clusterv1beta1.ConditionSeverityInfo, "%s creating or updating", service) default: - conditions.MarkFalse(s.AzureCluster, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(s.AzureCluster, condition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error()) } } // UpdatePatchStatus updates a condition on the AzureCluster status after a PATCH operation. -func (s *ClusterScope) UpdatePatchStatus(condition clusterv1.ConditionType, service string, err error) { +func (s *ClusterScope) UpdatePatchStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkTrue(s.AzureCluster, condition) + v1beta1conditions.MarkTrue(s.AzureCluster, condition) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(s.AzureCluster, condition, infrav1.UpdatingReason, clusterv1.ConditionSeverityInfo, "%s updating", service) + v1beta1conditions.MarkFalse(s.AzureCluster, condition, infrav1.UpdatingReason, clusterv1beta1.ConditionSeverityInfo, "%s updating", service) default: - conditions.MarkFalse(s.AzureCluster, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(s.AzureCluster, condition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) } } diff --git a/azure/scope/cluster_test.go b/azure/scope/cluster_test.go index 1c404f27bb0..e9022e7838e 100644 --- a/azure/scope/cluster_test.go +++ b/azure/scope/cluster_test.go @@ -33,7 +33,7 @@ import ( "k8s.io/component-base/featuregate" featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -78,11 +78,11 @@ func TestNewClusterScope(t *testing.T) { g := NewWithT(t) scheme := runtime.NewScheme() - _ = clusterv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) _ = corev1.AddToScheme(scheme) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", @@ -271,7 +271,7 @@ func TestAPIServerHost(t *testing.T) { for _, tc := range tests { g := NewWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", @@ -296,13 +296,13 @@ func TestAPIServerHost(t *testing.T) { func TestGettingSecurityRules(t *testing.T) { tests := []struct { name string - cluster *clusterv1.Cluster + cluster *clusterv1beta1.Cluster azureCluster *infrav1.AzureCluster expectedRuleCount int }{ { name: "default control plane subnet with no rules should have 2 security rules defaulted", - cluster: &clusterv1.Cluster{ + cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", @@ -336,7 +336,7 @@ func TestGettingSecurityRules(t *testing.T) { }, { name: "additional rules are preserved", - cluster: &clusterv1.Cluster{ + cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", @@ -386,7 +386,7 @@ func TestGettingSecurityRules(t *testing.T) { }, { name: "override rules are accepted", - cluster: &clusterv1.Cluster{ + cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", @@ -469,7 +469,7 @@ func TestPublicIPSpecs(t *testing.T) { Name: "my-cluster", }, Status: infrav1.AzureClusterStatus{ - FailureDomains: map[string]clusterv1.FailureDomainSpec{ + FailureDomains: map[string]clusterv1beta1.FailureDomainSpec{ "failure-domain-id-1": {}, "failure-domain-id-2": {}, "failure-domain-id-3": {}, @@ -502,7 +502,7 @@ func TestPublicIPSpecs(t *testing.T) { Name: "my-cluster", }, Status: infrav1.AzureClusterStatus{ - FailureDomains: map[string]clusterv1.FailureDomainSpec{ + FailureDomains: map[string]clusterv1beta1.FailureDomainSpec{ "failure-domain-id-1": {}, "failure-domain-id-2": {}, "failure-domain-id-3": {}, @@ -539,7 +539,7 @@ func TestPublicIPSpecs(t *testing.T) { Name: "my-cluster", }, Status: infrav1.AzureClusterStatus{ - FailureDomains: map[string]clusterv1.FailureDomainSpec{ + FailureDomains: map[string]clusterv1beta1.FailureDomainSpec{ "failure-domain-id-1": {}, "failure-domain-id-2": {}, "failure-domain-id-3": {}, @@ -599,7 +599,7 @@ func TestPublicIPSpecs(t *testing.T) { Name: "my-cluster", }, Status: infrav1.AzureClusterStatus{ - FailureDomains: map[string]clusterv1.FailureDomainSpec{ + FailureDomains: map[string]clusterv1beta1.FailureDomainSpec{ "failure-domain-id-1": {}, "failure-domain-id-2": {}, "failure-domain-id-3": {}, @@ -697,7 +697,7 @@ func TestPublicIPSpecs(t *testing.T) { Name: "my-cluster", }, Status: infrav1.AzureClusterStatus{ - FailureDomains: map[string]clusterv1.FailureDomainSpec{ + FailureDomains: map[string]clusterv1beta1.FailureDomainSpec{ "failure-domain-id-1": {}, "failure-domain-id-2": {}, "failure-domain-id-3": {}, @@ -754,7 +754,7 @@ func TestPublicIPSpecs(t *testing.T) { Name: "my-cluster", }, Status: infrav1.AzureClusterStatus{ - FailureDomains: map[string]clusterv1.FailureDomainSpec{ + FailureDomains: map[string]clusterv1beta1.FailureDomainSpec{ "failure-domain-id-1": {}, "failure-domain-id-2": {}, "failure-domain-id-3": {}, @@ -814,7 +814,7 @@ func TestPublicIPSpecs(t *testing.T) { Name: "my-cluster", }, Status: infrav1.AzureClusterStatus{ - FailureDomains: map[string]clusterv1.FailureDomainSpec{ + FailureDomains: map[string]clusterv1beta1.FailureDomainSpec{ "failure-domain-id-1": {}, "failure-domain-id-2": {}, "failure-domain-id-3": {}, @@ -905,7 +905,7 @@ func TestPublicIPSpecs(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: tc.azureCluster.Name, Namespace: "default", @@ -946,7 +946,7 @@ func TestRouteTableSpecs(t *testing.T) { { name: "returns specified route tables if present", clusterScope: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, @@ -1037,7 +1037,7 @@ func TestNatGatewaySpecs(t *testing.T) { { name: "returns specified node NAT gateway if present", clusterScope: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, @@ -1107,7 +1107,7 @@ func TestNatGatewaySpecs(t *testing.T) { { name: "returns specified node NAT gateway if present and ignores duplicate", clusterScope: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, @@ -1195,7 +1195,7 @@ func TestNatGatewaySpecs(t *testing.T) { { name: "returns specified node NAT gateway if present and ignores control plane nat gateway", clusterScope: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, @@ -1317,7 +1317,7 @@ func TestSetNatGatewayIDInSubnets(t *testing.T) { { name: "sets nat gateway id in the matching subnet", clusterScope: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, @@ -1401,7 +1401,7 @@ func TestNSGSpecs(t *testing.T) { { name: "returns specified security groups if present", clusterScope: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, @@ -1491,7 +1491,7 @@ func TestSubnetSpecs(t *testing.T) { { name: "returns specified subnet spec", clusterScope: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, @@ -1568,7 +1568,7 @@ func TestSubnetSpecs(t *testing.T) { { name: "returns specified subnet spec and bastion spec if enabled", clusterScope: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, @@ -1722,7 +1722,7 @@ func TestIsVnetManaged(t *testing.T) { { name: "Wrong tags", clusterScope: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, @@ -1753,7 +1753,7 @@ func TestIsVnetManaged(t *testing.T) { { name: "Has owning tags", clusterScope: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, @@ -1860,7 +1860,7 @@ func TestAzureBastionSpec(t *testing.T) { { name: "returns bastion spec if enabled", clusterScope: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, @@ -2129,7 +2129,7 @@ func TestGetPrivateDNSZoneName(t *testing.T) { t.Run(tc.clusterName, func(t *testing.T) { g := NewWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: tc.clusterName, Namespace: "default", @@ -2379,7 +2379,7 @@ func TestBackendPoolName(t *testing.T) { featuregatetesting.SetFeatureGateDuringTest(t, feature.Gates, tc.featureGate, true) } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: tc.clusterName, Namespace: "default", @@ -2582,7 +2582,7 @@ func TestGenerateFQDN(t *testing.T) { t.Run(tc.clusterName, func(t *testing.T) { g := NewWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: tc.clusterName, Namespace: "default", @@ -2677,7 +2677,7 @@ func TestAPIServerPort(t *testing.T) { tests := []struct { name string clusterName string - clusterNetowrk *clusterv1.ClusterNetwork + clusterNetowrk *clusterv1beta1.ClusterNetwork expectAPIServerPort int32 }{ { @@ -2689,13 +2689,13 @@ func TestAPIServerPort(t *testing.T) { { name: "Non nil cluster network but nil apiserverport", clusterName: "my-cluster", - clusterNetowrk: &clusterv1.ClusterNetwork{}, + clusterNetowrk: &clusterv1beta1.ClusterNetwork{}, expectAPIServerPort: 6443, }, { name: "Non nil cluster network and non nil apiserverport", clusterName: "my-cluster", - clusterNetowrk: &clusterv1.ClusterNetwork{ + clusterNetowrk: &clusterv1beta1.ClusterNetwork{ APIServerPort: ptr.To[int32](7000), }, expectAPIServerPort: 7000, @@ -2705,12 +2705,12 @@ func TestAPIServerPort(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: tc.clusterName, Namespace: "default", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ ClusterNetwork: tc.clusterNetowrk, }, } @@ -2748,7 +2748,7 @@ func TestFailureDomains(t *testing.T) { expectFailureDomains: []*string{ptr.To("failure-domain-id")}, clusterName: "my-cluster", azureClusterStatus: infrav1.AzureClusterStatus{ - FailureDomains: map[string]clusterv1.FailureDomainSpec{ + FailureDomains: map[string]clusterv1beta1.FailureDomainSpec{ "failure-domain-id": {}, }, }, @@ -2758,7 +2758,7 @@ func TestFailureDomains(t *testing.T) { expectFailureDomains: []*string{ptr.To("failure-domain-id-1"), ptr.To("failure-domain-id-2"), ptr.To("failure-domain-id-3")}, clusterName: "my-cluster", azureClusterStatus: infrav1.AzureClusterStatus{ - FailureDomains: map[string]clusterv1.FailureDomainSpec{ + FailureDomains: map[string]clusterv1beta1.FailureDomainSpec{ "failure-domain-id-1": {}, "failure-domain-id-2": {}, "failure-domain-id-3": {}, @@ -3310,7 +3310,7 @@ func TestClusterScope_LBSpecs(t *testing.T) { if tc.featureGate == feature.APIServerILB { featuregatetesting.SetFeatureGateDuringTest(t, feature.Gates, tc.featureGate, true) } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: tc.azureCluster.Name, Namespace: "default", @@ -3616,7 +3616,7 @@ func TestVNetPeerings(t *testing.T) { clusterName := "my-cluster" clusterNamespace := "default" - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, Namespace: clusterNamespace, @@ -3691,7 +3691,7 @@ func TestPrivateEndpointSpecs(t *testing.T) { { name: "returns list of private endpoint specs if private endpoints are specified", clusterScope: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "dummy-ns", @@ -3891,37 +3891,37 @@ func TestSetFailureDomain(t *testing.T) { t.Parallel() cases := map[string]struct { - discoveredFDs clusterv1.FailureDomains - specifiedFDs clusterv1.FailureDomains - expectedFDs clusterv1.FailureDomains + discoveredFDs clusterv1beta1.FailureDomains + specifiedFDs clusterv1beta1.FailureDomains + expectedFDs clusterv1beta1.FailureDomains }{ "no failure domains specified": { - discoveredFDs: clusterv1.FailureDomains{ - "fd1": clusterv1.FailureDomainSpec{ControlPlane: true}, - "fd2": clusterv1.FailureDomainSpec{ControlPlane: false}, + discoveredFDs: clusterv1beta1.FailureDomains{ + "fd1": clusterv1beta1.FailureDomainSpec{ControlPlane: true}, + "fd2": clusterv1beta1.FailureDomainSpec{ControlPlane: false}, }, - expectedFDs: clusterv1.FailureDomains{ - "fd1": clusterv1.FailureDomainSpec{ControlPlane: true}, - "fd2": clusterv1.FailureDomainSpec{ControlPlane: false}, + expectedFDs: clusterv1beta1.FailureDomains{ + "fd1": clusterv1beta1.FailureDomainSpec{ControlPlane: true}, + "fd2": clusterv1beta1.FailureDomainSpec{ControlPlane: false}, }, }, "no failure domains discovered": { - specifiedFDs: clusterv1.FailureDomains{"fd1": clusterv1.FailureDomainSpec{ControlPlane: true}}, + specifiedFDs: clusterv1beta1.FailureDomains{"fd1": clusterv1beta1.FailureDomainSpec{ControlPlane: true}}, }, "failure domain specified without intersection": { - discoveredFDs: clusterv1.FailureDomains{"fd1": clusterv1.FailureDomainSpec{ControlPlane: true}}, - specifiedFDs: clusterv1.FailureDomains{"fd2": clusterv1.FailureDomainSpec{ControlPlane: false}}, - expectedFDs: clusterv1.FailureDomains{"fd1": clusterv1.FailureDomainSpec{ControlPlane: true}}, + discoveredFDs: clusterv1beta1.FailureDomains{"fd1": clusterv1beta1.FailureDomainSpec{ControlPlane: true}}, + specifiedFDs: clusterv1beta1.FailureDomains{"fd2": clusterv1beta1.FailureDomainSpec{ControlPlane: false}}, + expectedFDs: clusterv1beta1.FailureDomains{"fd1": clusterv1beta1.FailureDomainSpec{ControlPlane: true}}, }, "failure domain override to false succeeds": { - discoveredFDs: clusterv1.FailureDomains{"fd1": clusterv1.FailureDomainSpec{ControlPlane: true}}, - specifiedFDs: clusterv1.FailureDomains{"fd1": clusterv1.FailureDomainSpec{ControlPlane: false}}, - expectedFDs: clusterv1.FailureDomains{"fd1": clusterv1.FailureDomainSpec{ControlPlane: false}}, + discoveredFDs: clusterv1beta1.FailureDomains{"fd1": clusterv1beta1.FailureDomainSpec{ControlPlane: true}}, + specifiedFDs: clusterv1beta1.FailureDomains{"fd1": clusterv1beta1.FailureDomainSpec{ControlPlane: false}}, + expectedFDs: clusterv1beta1.FailureDomains{"fd1": clusterv1beta1.FailureDomainSpec{ControlPlane: false}}, }, "failure domain override to true fails": { - discoveredFDs: clusterv1.FailureDomains{"fd1": clusterv1.FailureDomainSpec{ControlPlane: false}}, - specifiedFDs: clusterv1.FailureDomains{"fd1": clusterv1.FailureDomainSpec{ControlPlane: true}}, - expectedFDs: clusterv1.FailureDomains{"fd1": clusterv1.FailureDomainSpec{ControlPlane: false}}, + discoveredFDs: clusterv1beta1.FailureDomains{"fd1": clusterv1beta1.FailureDomainSpec{ControlPlane: false}}, + specifiedFDs: clusterv1beta1.FailureDomains{"fd1": clusterv1beta1.FailureDomainSpec{ControlPlane: true}}, + expectedFDs: clusterv1beta1.FailureDomains{"fd1": clusterv1beta1.FailureDomainSpec{ControlPlane: false}}, }, } @@ -3968,7 +3968,7 @@ func TestGroupSpecs(t *testing.T) { { name: "virtualNetwork belongs to a different resource group", input: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, @@ -4004,7 +4004,7 @@ func TestGroupSpecs(t *testing.T) { { name: "virtualNetwork belongs to a same resource group", input: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, @@ -4033,7 +4033,7 @@ func TestGroupSpecs(t *testing.T) { { name: "virtualNetwork resource group not specified", input: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -4063,7 +4063,7 @@ func TestGroupSpecs(t *testing.T) { { name: "virtualNetwork belongs to different resource group with non-k8s name", input: ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -4223,7 +4223,7 @@ func TestPrivateDNSSpec(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: tc.clusterName, Namespace: "default", diff --git a/azure/scope/machine.go b/azure/scope/machine.go index 141a4084770..12cedf58e07 100644 --- a/azure/scope/machine.go +++ b/azure/scope/machine.go @@ -28,10 +28,9 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -50,13 +49,14 @@ import ( azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure" "sigs.k8s.io/cluster-api-provider-azure/util/futures" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // MachineScopeParams defines the input parameters used to create a new MachineScope. type MachineScopeParams struct { Client client.Client ClusterScope azure.ClusterScoper - Machine *clusterv1.Machine + Machine *clusterv1beta1.Machine AzureMachine *infrav1.AzureMachine Cache *MachineCache SKUCache SKUCacher @@ -75,7 +75,7 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { return nil, errors.New("azure machine is required when creating a MachineScope") } - helper, err := patch.NewHelper(params.AzureMachine, params.Client) + helper, err := v1beta1patch.NewHelper(params.AzureMachine, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -94,10 +94,10 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { // MachineScope defines a scope defined around a machine and its cluster. type MachineScope struct { client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper azure.ClusterScoper - Machine *clusterv1.Machine + Machine *clusterv1beta1.Machine AzureMachine *infrav1.AzureMachine cache *MachineCache skuCache SKUCacher @@ -464,12 +464,12 @@ func (m *MachineScope) Namespace() string { // IsControlPlane returns true if the machine is a control plane. func (m *MachineScope) IsControlPlane() bool { - return util.IsControlPlaneMachine(m.Machine) + return clusterv1beta1util.IsControlPlaneMachine(m.Machine) } // Role returns the machine role from the labels. func (m *MachineScope) Role() string { - if util.IsControlPlaneMachine(m.Machine) { + if clusterv1beta1util.IsControlPlaneMachine(m.Machine) { return infrav1.ControlPlane } return infrav1.Node @@ -526,12 +526,12 @@ func (m *MachineScope) AvailabilitySet() (string, bool) { } // get machine deployment name from labels for machines that maybe part of a machine deployment. - if mdName, ok := m.Machine.Labels[clusterv1.MachineDeploymentNameLabel]; ok { + if mdName, ok := m.Machine.Labels[clusterv1beta1.MachineDeploymentNameLabel]; ok { return azure.GenerateAvailabilitySetName(m.ClusterName(), mdName), true } // if machine deployment name label is not available, use machine set name. - if msName, ok := m.Machine.Labels[clusterv1.MachineSetNameLabel]; ok { + if msName, ok := m.Machine.Labels[clusterv1beta1.MachineSetNameLabel]; ok { return azure.GenerateAvailabilitySetName(m.ClusterName(), msName), true } @@ -610,8 +610,8 @@ func (m *MachineScope) SetFailureReason(v string) { } // SetConditionFalse sets the specified AzureMachine condition to false. -func (m *MachineScope) SetConditionFalse(conditionType clusterv1.ConditionType, reason string, severity clusterv1.ConditionSeverity, message string) { - conditions.MarkFalse(m.AzureMachine, conditionType, reason, severity, "%s", message) +func (m *MachineScope) SetConditionFalse(conditionType clusterv1beta1.ConditionType, reason string, severity clusterv1beta1.ConditionSeverity, message string) { + v1beta1conditions.MarkFalse(m.AzureMachine, conditionType, reason, severity, "%s", message) } // SetAnnotation sets a key value annotation on the AzureMachine. @@ -656,13 +656,13 @@ func (m *MachineScope) SetAddresses(addrs []corev1.NodeAddress) { // PatchObject persists the machine spec and status. func (m *MachineScope) PatchObject(ctx context.Context) error { - conditions.SetSummary(m.AzureMachine) + v1beta1conditions.SetSummary(m.AzureMachine) return m.patchHelper.Patch( ctx, m.AzureMachine, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - clusterv1.ReadyCondition, + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + clusterv1beta1.ReadyCondition, infrav1.VMRunningCondition, infrav1.AvailabilitySetReadyCondition, infrav1.NetworkInterfaceReadyCondition, @@ -786,38 +786,38 @@ func (m *MachineScope) DeleteLongRunningOperationState(name, service, futureType } // UpdateDeleteStatus updates a condition on the AzureMachine status after a DELETE operation. -func (m *MachineScope) UpdateDeleteStatus(condition clusterv1.ConditionType, service string, err error) { +func (m *MachineScope) UpdateDeleteStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkFalse(m.AzureMachine, condition, infrav1.DeletedReason, clusterv1.ConditionSeverityInfo, "%s successfully deleted", service) + v1beta1conditions.MarkFalse(m.AzureMachine, condition, infrav1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "%s successfully deleted", service) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(m.AzureMachine, condition, infrav1.DeletingReason, clusterv1.ConditionSeverityInfo, "%s deleting", service) + v1beta1conditions.MarkFalse(m.AzureMachine, condition, infrav1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "%s deleting", service) default: - conditions.MarkFalse(m.AzureMachine, condition, infrav1.DeletionFailedReason, clusterv1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(m.AzureMachine, condition, infrav1.DeletionFailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error()) } } // UpdatePutStatus updates a condition on the AzureMachine status after a PUT operation. -func (m *MachineScope) UpdatePutStatus(condition clusterv1.ConditionType, service string, err error) { +func (m *MachineScope) UpdatePutStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkTrue(m.AzureMachine, condition) + v1beta1conditions.MarkTrue(m.AzureMachine, condition) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(m.AzureMachine, condition, infrav1.CreatingReason, clusterv1.ConditionSeverityInfo, "%s creating or updating", service) + v1beta1conditions.MarkFalse(m.AzureMachine, condition, infrav1.CreatingReason, clusterv1beta1.ConditionSeverityInfo, "%s creating or updating", service) default: - conditions.MarkFalse(m.AzureMachine, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(m.AzureMachine, condition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error()) } } // UpdatePatchStatus updates a condition on the AzureMachine status after a PATCH operation. -func (m *MachineScope) UpdatePatchStatus(condition clusterv1.ConditionType, service string, err error) { +func (m *MachineScope) UpdatePatchStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkTrue(m.AzureMachine, condition) + v1beta1conditions.MarkTrue(m.AzureMachine, condition) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(m.AzureMachine, condition, infrav1.UpdatingReason, clusterv1.ConditionSeverityInfo, "%s updating", service) + v1beta1conditions.MarkFalse(m.AzureMachine, condition, infrav1.UpdatingReason, clusterv1beta1.ConditionSeverityInfo, "%s updating", service) default: - conditions.MarkFalse(m.AzureMachine, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(m.AzureMachine, condition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) } } diff --git a/azure/scope/machine_test.go b/azure/scope/machine_test.go index 89b2f7b6744..81afaf91da0 100644 --- a/azure/scope/machine_test.go +++ b/azure/scope/machine_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/component-base/featuregate" featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -90,7 +90,7 @@ func TestMachineScope_Name(t *testing.T) { name: "Windows name with long MachineName and short cluster name", machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, @@ -116,7 +116,7 @@ func TestMachineScope_Name(t *testing.T) { name: "Windows name with long MachineName and long cluster name", machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster8901234", }, @@ -275,7 +275,7 @@ func TestMachineScope_PublicIPSpecs(t *testing.T) { }, }, ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", // Note: m.ClusterName() takes the value from the Cluster object, not the AzureCluster object @@ -286,7 +286,7 @@ func TestMachineScope_PublicIPSpecs(t *testing.T) { Name: "my-cluster", }, Status: infrav1.AzureClusterStatus{ - FailureDomains: map[string]clusterv1.FailureDomainSpec{ + FailureDomains: map[string]clusterv1beta1.FailureDomainSpec{ "failure-domain-id-1": {}, "failure-domain-id-2": {}, "failure-domain-id-3": {}, @@ -348,7 +348,7 @@ func TestMachineScope_InboundNatSpecs(t *testing.T) { { name: "returns empty when infra is not control plane", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -360,10 +360,10 @@ func TestMachineScope_InboundNatSpecs(t *testing.T) { { name: "returns InboundNatSpec when infra is control plane", machineScope: MachineScope{ - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.MachineControlPlaneLabel: "", + clusterv1beta1.MachineControlPlaneLabel: "", }, }, }, @@ -425,7 +425,7 @@ func TestMachineScope_RoleAssignmentSpecs(t *testing.T) { { name: "returns empty if VM identity is not system assigned", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -437,7 +437,7 @@ func TestMachineScope_RoleAssignmentSpecs(t *testing.T) { { name: "returns RoleAssignmentSpec if VM identity is system assigned", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -477,7 +477,7 @@ func TestMachineScope_RoleAssignmentSpecs(t *testing.T) { { name: "returns RoleAssignmentSpec with specified scope and role assignment id", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -537,7 +537,7 @@ func TestMachineScope_VMExtensionSpecs(t *testing.T) { { name: "If OS type is Linux and cloud is AzurePublicCloud, it returns ExtensionSpec", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -584,7 +584,7 @@ func TestMachineScope_VMExtensionSpecs(t *testing.T) { { name: "If OS type is Linux and cloud is AzurePublicCloud and DisableExtensionOperations is true, it returns empty", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -618,7 +618,7 @@ func TestMachineScope_VMExtensionSpecs(t *testing.T) { { name: "If OS type is Linux and cloud is not AzurePublicCloud, it returns empty", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -651,7 +651,7 @@ func TestMachineScope_VMExtensionSpecs(t *testing.T) { { name: "If OS type is Windows and cloud is AzurePublicCloud, it returns ExtensionSpec", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -698,7 +698,7 @@ func TestMachineScope_VMExtensionSpecs(t *testing.T) { { name: "If OS type is Windows and cloud is not AzurePublicCloud, it returns empty", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -731,7 +731,7 @@ func TestMachineScope_VMExtensionSpecs(t *testing.T) { { name: "If OS type is not Linux or Windows and cloud is AzurePublicCloud, it returns empty", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -764,7 +764,7 @@ func TestMachineScope_VMExtensionSpecs(t *testing.T) { { name: "If OS type is not Windows or Linux and cloud is not AzurePublicCloud, it returns empty", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -797,7 +797,7 @@ func TestMachineScope_VMExtensionSpecs(t *testing.T) { { name: "If a custom VM extension is specified, it returns the custom VM extension", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -873,7 +873,7 @@ func TestMachineScope_VMExtensionSpecs(t *testing.T) { { name: "If a custom VM extension is specified and bootstrap extension is disabled, it returns only the custom VM extension", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -1053,8 +1053,8 @@ func TestMachineScope_AvailabilityZone(t *testing.T) { { name: "returns empty if no failure domain is present", machineScope: MachineScope{ - Machine: &clusterv1.Machine{ - Spec: clusterv1.MachineSpec{}, + Machine: &clusterv1beta1.Machine{ + Spec: clusterv1beta1.MachineSpec{}, }, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ @@ -1068,8 +1068,8 @@ func TestMachineScope_AvailabilityZone(t *testing.T) { { name: "returns failure domain from the machine spec", machineScope: MachineScope{ - Machine: &clusterv1.Machine{ - Spec: clusterv1.MachineSpec{ + Machine: &clusterv1beta1.Machine{ + Spec: clusterv1beta1.MachineSpec{ FailureDomain: ptr.To("dummy-failure-domain-from-machine-spec"), }, }, @@ -1087,8 +1087,8 @@ func TestMachineScope_AvailabilityZone(t *testing.T) { { name: "returns failure domain from the azuremachine spec", machineScope: MachineScope{ - Machine: &clusterv1.Machine{ - Spec: clusterv1.MachineSpec{}, + Machine: &clusterv1beta1.Machine{ + Spec: clusterv1beta1.MachineSpec{}, }, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ @@ -1159,7 +1159,7 @@ func TestMachineScope_IsControlPlane(t *testing.T) { { name: "returns false when machine is not control plane", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -1171,10 +1171,10 @@ func TestMachineScope_IsControlPlane(t *testing.T) { { name: "returns true when machine is control plane", machineScope: MachineScope{ - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.MachineControlPlaneLabel: "", + clusterv1beta1.MachineControlPlaneLabel: "", }, }, }, @@ -1205,7 +1205,7 @@ func TestMachineScope_Role(t *testing.T) { { name: "returns node when machine is worker", machineScope: MachineScope{ - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -1217,10 +1217,10 @@ func TestMachineScope_Role(t *testing.T) { { name: "returns control-plane when machine is control plane", machineScope: MachineScope{ - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.MachineControlPlaneLabel: "", + clusterv1beta1.MachineControlPlaneLabel: "", }, }, }, @@ -1253,20 +1253,20 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { name: "returns empty and false if availability set is not enabled", machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, }, AzureCluster: &infrav1.AzureCluster{ Status: infrav1.AzureClusterStatus{ - FailureDomains: clusterv1.FailureDomains{ - "foo-failure-domain": clusterv1.FailureDomainSpec{}, + FailureDomains: clusterv1beta1.FailureDomains{ + "foo-failure-domain": clusterv1beta1.FailureDomainSpec{}, }, }, }, }, - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{}, }, wantAvailabilitySetName: "", @@ -1277,7 +1277,7 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, @@ -1286,10 +1286,10 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { Status: infrav1.AzureClusterStatus{}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.MachineControlPlaneLabel: "", + clusterv1beta1.MachineControlPlaneLabel: "", }, }, }, @@ -1303,7 +1303,7 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, @@ -1312,10 +1312,10 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { Status: infrav1.AzureClusterStatus{}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.MachineDeploymentNameLabel: "foo-machine-deployment", + clusterv1beta1.MachineDeploymentNameLabel: "foo-machine-deployment", }, }, }, @@ -1328,7 +1328,7 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { name: "returns empty and false if machine is using spot instances", machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, @@ -1337,10 +1337,10 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { Status: infrav1.AzureClusterStatus{}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.MachineDeploymentNameLabel: "foo-machine-deployment", + clusterv1beta1.MachineDeploymentNameLabel: "foo-machine-deployment", }, }, }, @@ -1358,7 +1358,7 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, @@ -1367,10 +1367,10 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { Status: infrav1.AzureClusterStatus{}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.MachineSetNameLabel: "foo-machine-set", + clusterv1beta1.MachineSetNameLabel: "foo-machine-set", }, }, }, @@ -1384,7 +1384,7 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, @@ -1393,11 +1393,11 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { Status: infrav1.AzureClusterStatus{}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.MachineDeploymentNameLabel: "foo-machine-deployment", - clusterv1.MachineSetNameLabel: "foo-machine-set", + clusterv1beta1.MachineDeploymentNameLabel: "foo-machine-deployment", + clusterv1beta1.MachineSetNameLabel: "foo-machine-set", }, }, }, @@ -1411,7 +1411,7 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, @@ -1420,7 +1420,7 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { Status: infrav1.AzureClusterStatus{}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{}, }, @@ -1434,7 +1434,7 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { name: "returns empty and false if machine has failureDomain set", machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, @@ -1443,13 +1443,13 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { Status: infrav1.AzureClusterStatus{}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.MachineDeploymentNameLabel: "foo-machine-deployment", + clusterv1beta1.MachineDeploymentNameLabel: "foo-machine-deployment", }, }, - Spec: clusterv1.MachineSpec{ + Spec: clusterv1beta1.MachineSpec{ FailureDomain: ptr.To("1"), }, }, @@ -1464,7 +1464,7 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { name: "returns empty and false if azureMachine has failureDomain set", machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, @@ -1473,10 +1473,10 @@ func TestMachineScope_AvailabilitySet(t *testing.T) { Status: infrav1.AzureClusterStatus{}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.MachineDeploymentNameLabel: "foo-machine-deployment", + clusterv1beta1.MachineDeploymentNameLabel: "foo-machine-deployment", }, }, }, @@ -1587,11 +1587,11 @@ func TestMachineScope_GetVMImage(t *testing.T) { { name: "if no image is specified and os specified is windows, returns windows containerd image", machineScope: MachineScope{ - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", }, - Spec: clusterv1.MachineSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("1.20.1"), }, }, @@ -1616,11 +1616,11 @@ func TestMachineScope_GetVMImage(t *testing.T) { { name: "if no image is specified and os specified is windows with annotation dockershim, returns error", machineScope: MachineScope{ - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", }, - Spec: clusterv1.MachineSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("1.22.1"), }, }, @@ -1648,11 +1648,11 @@ func TestMachineScope_GetVMImage(t *testing.T) { { name: "if no image is specified and os specified is windows with windowsServerVersion annotation set to 2019, returns error", machineScope: MachineScope{ - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", }, - Spec: clusterv1.MachineSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("1.23.3"), }, }, @@ -1680,11 +1680,11 @@ func TestMachineScope_GetVMImage(t *testing.T) { { name: "if no image is specified and os specified is windows with windowsServerVersion annotation set to 2022, retrurns 2022 image", machineScope: MachineScope{ - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", }, - Spec: clusterv1.MachineSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("1.23.3"), }, }, @@ -1712,11 +1712,11 @@ func TestMachineScope_GetVMImage(t *testing.T) { { name: "if no image and OS is specified, returns linux image", machineScope: MachineScope{ - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", }, - Spec: clusterv1.MachineSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("1.20.1"), }, }, @@ -1761,7 +1761,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { AzureClients: AzureClients{ subscriptionID: "123", }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "default", @@ -1819,11 +1819,11 @@ func TestMachineScope_NICSpecs(t *testing.T) { }}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", Labels: map[string]string{ - // clusterv1.MachineControlPlaneLabel: "true", + // clusterv1beta1.MachineControlPlaneLabel: "true", }, }, }, @@ -1864,7 +1864,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { AzureClients: AzureClients{ subscriptionID: "123", }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "default", @@ -1922,11 +1922,11 @@ func TestMachineScope_NICSpecs(t *testing.T) { }}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", Labels: map[string]string{ - // clusterv1.MachineControlPlaneLabel: "true", + // clusterv1beta1.MachineControlPlaneLabel: "true", }, }, }, @@ -1974,7 +1974,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { AzureClients: AzureClients{ subscriptionID: "123", }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "default", @@ -2034,11 +2034,11 @@ func TestMachineScope_NICSpecs(t *testing.T) { }}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", Labels: map[string]string{ - // clusterv1.MachineControlPlaneLabel: "true", + // clusterv1beta1.MachineControlPlaneLabel: "true", }, }, }, @@ -2079,7 +2079,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { AzureClients: AzureClients{ subscriptionID: "123", }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "default", @@ -2135,11 +2135,11 @@ func TestMachineScope_NICSpecs(t *testing.T) { AllocatePublicIP: true, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", Labels: map[string]string{ - // clusterv1.MachineControlPlaneLabel: "true", + // clusterv1beta1.MachineControlPlaneLabel: "true", }, }, }, @@ -2180,7 +2180,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { AzureClients: AzureClients{ subscriptionID: "123", }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "default", @@ -2244,11 +2244,11 @@ func TestMachineScope_NICSpecs(t *testing.T) { }}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", Labels: map[string]string{ - clusterv1.MachineControlPlaneLabel: "true", + clusterv1beta1.MachineControlPlaneLabel: "true", }, }, }, @@ -2289,7 +2289,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { AzureClients: AzureClients{ subscriptionID: "123", }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "default", @@ -2350,11 +2350,11 @@ func TestMachineScope_NICSpecs(t *testing.T) { }}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", Labels: map[string]string{ - clusterv1.MachineControlPlaneLabel: "true", + clusterv1beta1.MachineControlPlaneLabel: "true", }, }, }, @@ -2394,7 +2394,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { AzureClients: AzureClients{ subscriptionID: "123", }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "default", @@ -2455,11 +2455,11 @@ func TestMachineScope_NICSpecs(t *testing.T) { }}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", Labels: map[string]string{ - clusterv1.MachineControlPlaneLabel: "true", + clusterv1beta1.MachineControlPlaneLabel: "true", }, }, }, @@ -2500,7 +2500,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { AzureClients: AzureClients{ subscriptionID: "123", }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "default", @@ -2562,11 +2562,11 @@ func TestMachineScope_NICSpecs(t *testing.T) { DNSServers: []string{"123.123.123.123", "124.124.124.124"}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", Labels: map[string]string{ - clusterv1.MachineControlPlaneLabel: "true", + clusterv1beta1.MachineControlPlaneLabel: "true", }, }, }, @@ -2606,7 +2606,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { AzureClients: AzureClients{ subscriptionID: "123", }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "default", @@ -2668,11 +2668,11 @@ func TestMachineScope_NICSpecs(t *testing.T) { DNSServers: []string{"123.123.123.123", "124.124.124.124"}, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", Labels: map[string]string{ - clusterv1.MachineControlPlaneLabel: "true", + clusterv1beta1.MachineControlPlaneLabel: "true", }, }, }, @@ -2713,7 +2713,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { AzureClients: AzureClients{ subscriptionID: "123", }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "default", @@ -2782,7 +2782,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { }, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", Labels: map[string]string{}, @@ -2849,7 +2849,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { AzureClients: AzureClients{ subscriptionID: "123", }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "default", @@ -2916,7 +2916,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { }, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", Labels: map[string]string{}, @@ -2983,7 +2983,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { AzureClients: AzureClients{ subscriptionID: "123", }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "default", @@ -3047,7 +3047,7 @@ func TestMachineScope_NICSpecs(t *testing.T) { }, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", Labels: map[string]string{}, @@ -3108,7 +3108,7 @@ func TestDiskSpecs(t *testing.T) { name: "only os disk", machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, @@ -3133,7 +3133,7 @@ func TestDiskSpecs(t *testing.T) { }, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", }, @@ -3150,7 +3150,7 @@ func TestDiskSpecs(t *testing.T) { name: "os and data disks", machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, @@ -3180,7 +3180,7 @@ func TestDiskSpecs(t *testing.T) { }, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", }, @@ -3200,7 +3200,7 @@ func TestDiskSpecs(t *testing.T) { name: "os and multiple data disks", machineScope: MachineScope{ ClusterScoper: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, @@ -3233,7 +3233,7 @@ func TestDiskSpecs(t *testing.T) { }, }, }, - Machine: &clusterv1.Machine{ + Machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine", }, diff --git a/azure/scope/machinepool.go b/azure/scope/machinepool.go index 35283ef2a68..7bb63b82b52 100644 --- a/azure/scope/machinepool.go +++ b/azure/scope/machinepool.go @@ -32,13 +32,11 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/labels/format" - "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -53,6 +51,7 @@ import ( azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure" "sigs.k8s.io/cluster-api-provider-azure/util/futures" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // ScalesetsServiceName is the name of the scalesets service. @@ -64,7 +63,7 @@ type ( // MachinePoolScopeParams defines the input parameters used to create a new MachinePoolScope. MachinePoolScopeParams struct { Client client.Client - MachinePool *expv1.MachinePool + MachinePool *clusterv1beta1.MachinePool AzureMachinePool *infrav1exp.AzureMachinePool ClusterScope azure.ClusterScoper Cache *MachinePoolCache @@ -74,10 +73,10 @@ type ( MachinePoolScope struct { azure.ClusterScoper AzureMachinePool *infrav1exp.AzureMachinePool - MachinePool *expv1.MachinePool + MachinePool *clusterv1beta1.MachinePool client client.Client - patchHelper *patch.Helper - capiMachinePoolPatchHelper *patch.Helper + patchHelper *v1beta1patch.Helper + capiMachinePoolPatchHelper *v1beta1patch.Helper vmssState *azure.VMSS cache *MachinePoolCache skuCache *resourceskus.Cache @@ -114,12 +113,12 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro return nil, errors.New("azure machine pool is required when creating a MachinePoolScope") } - helper, err := patch.NewHelper(params.AzureMachinePool, params.Client) + helper, err := v1beta1patch.NewHelper(params.AzureMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } - capiMachinePoolPatchHelper, err := patch.NewHelper(params.MachinePool, params.Client) + capiMachinePoolPatchHelper, err := v1beta1patch.NewHelper(params.MachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init capi patch helper") } @@ -374,10 +373,10 @@ func (m *MachinePoolScope) updateReplicasAndProviderIDs(ctx context.Context) err func (m *MachinePoolScope) getMachinePoolMachineLabels() map[string]string { return map[string]string{ - clusterv1.ClusterNameLabel: m.ClusterName(), - infrav1exp.MachinePoolNameLabel: m.AzureMachinePool.Name, - clusterv1.MachinePoolNameLabel: format.MustFormatValue(m.MachinePool.Name), - m.ClusterName(): string(infrav1.ResourceLifecycleOwned), + clusterv1beta1.ClusterNameLabel: m.ClusterName(), + infrav1exp.MachinePoolNameLabel: m.AzureMachinePool.Name, + clusterv1beta1.MachinePoolNameLabel: format.MustFormatValue(m.MachinePool.Name), + m.ClusterName(): string(infrav1.ResourceLifecycleOwned), } } @@ -410,23 +409,23 @@ func (m *MachinePoolScope) applyAzureMachinePoolMachines(ctx context.Context) er existingMachinesByProviderID := make(map[string]infrav1exp.AzureMachinePoolMachine, len(ampms)) for _, ampm := range ampms { - machine, err := util.GetOwnerMachine(ctx, m.client, ampm.ObjectMeta) + machine, err := clusterv1beta1util.GetOwnerMachine(ctx, m.client, ampm.ObjectMeta) if err != nil { return fmt.Errorf("failed to find owner machine for %s/%s: %w", ampm.Namespace, ampm.Name, err) } - if _, ampmHasDeleteAnnotation := ampm.Annotations[clusterv1.DeleteMachineAnnotation]; !ampmHasDeleteAnnotation { + if _, ampmHasDeleteAnnotation := ampm.Annotations[clusterv1beta1.DeleteMachineAnnotation]; !ampmHasDeleteAnnotation { // fetch Machine delete annotation from owner machine to AzureMachinePoolMachine. // This ensures setting a deleteMachine annotation on the Machine has an effect on the AzureMachinePoolMachine // and the deployment strategy, in case the automatic propagation of the annotation from Machine to AzureMachinePoolMachine // hasn't been done yet. if machine != nil && machine.Annotations != nil { - if _, hasDeleteAnnotation := machine.Annotations[clusterv1.DeleteMachineAnnotation]; hasDeleteAnnotation { + if _, hasDeleteAnnotation := machine.Annotations[clusterv1beta1.DeleteMachineAnnotation]; hasDeleteAnnotation { log.V(4).Info("fetched DeleteMachineAnnotation", "machine", ampm.Spec.ProviderID) if ampm.Annotations == nil { ampm.Annotations = make(map[string]string) } - ampm.Annotations[clusterv1.DeleteMachineAnnotation] = machine.Annotations[clusterv1.DeleteMachineAnnotation] + ampm.Annotations[clusterv1beta1.DeleteMachineAnnotation] = machine.Annotations[clusterv1beta1.DeleteMachineAnnotation] } } } else { @@ -545,7 +544,7 @@ func (m *MachinePoolScope) createMachine(ctx context.Context, machine azure.VMSS ampm.Labels = labels controllerutil.AddFinalizer(&m, infrav1exp.AzureMachinePoolMachineFinalizer) - conditions.MarkFalse(&m, infrav1.VMRunningCondition, string(infrav1.Creating), clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(&m, infrav1.VMRunningCondition, string(infrav1.Creating), clusterv1beta1.ConditionSeverityInfo, "") if err := m.client.Create(ctx, &m); err != nil { return errors.Wrapf(err, "failed creating AzureMachinePoolMachine %s in AzureMachinePool %s", machine.ID, m.AzureMachinePool.Name) } @@ -558,7 +557,7 @@ func (m *MachinePoolScope) DeleteMachine(ctx context.Context, ampm infrav1exp.Az ctx, log, done := tele.StartSpanWithLogger(ctx, "scope.MachinePoolScope.DeleteMachine") defer done() - machine, err := util.GetOwnerMachine(ctx, m.client, ampm.ObjectMeta) + machine, err := clusterv1beta1util.GetOwnerMachine(ctx, m.client, ampm.ObjectMeta) if err != nil { return errors.Wrapf(err, "error getting owner Machine for AzureMachinePoolMachine %s/%s", ampm.Namespace, ampm.Name) } @@ -603,33 +602,33 @@ func (m *MachinePoolScope) setProvisioningStateAndConditions(v infrav1.Provision switch { case v == infrav1.Succeeded && *m.MachinePool.Spec.Replicas == m.AzureMachinePool.Status.Replicas: // vmss is provisioned with enough ready replicas - conditions.MarkTrue(m.AzureMachinePool, infrav1.ScaleSetRunningCondition) - conditions.MarkTrue(m.AzureMachinePool, infrav1.ScaleSetModelUpdatedCondition) - conditions.MarkTrue(m.AzureMachinePool, infrav1.ScaleSetDesiredReplicasCondition) + v1beta1conditions.MarkTrue(m.AzureMachinePool, infrav1.ScaleSetRunningCondition) + v1beta1conditions.MarkTrue(m.AzureMachinePool, infrav1.ScaleSetModelUpdatedCondition) + v1beta1conditions.MarkTrue(m.AzureMachinePool, infrav1.ScaleSetDesiredReplicasCondition) m.SetReady() case v == infrav1.Succeeded && *m.MachinePool.Spec.Replicas != m.AzureMachinePool.Status.Replicas: // not enough ready or too many ready replicas we must still be scaling up or down updatingState := infrav1.Updating m.AzureMachinePool.Status.ProvisioningState = &updatingState if *m.MachinePool.Spec.Replicas > m.AzureMachinePool.Status.Replicas { - conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetDesiredReplicasCondition, infrav1.ScaleSetScaleUpReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetDesiredReplicasCondition, infrav1.ScaleSetScaleUpReason, clusterv1beta1.ConditionSeverityInfo, "") } else { - conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetDesiredReplicasCondition, infrav1.ScaleSetScaleDownReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetDesiredReplicasCondition, infrav1.ScaleSetScaleDownReason, clusterv1beta1.ConditionSeverityInfo, "") } m.SetReady() case v == infrav1.Updating: - conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetModelUpdatedCondition, infrav1.ScaleSetModelOutOfDateReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetModelUpdatedCondition, infrav1.ScaleSetModelOutOfDateReason, clusterv1beta1.ConditionSeverityInfo, "") m.SetReady() case v == infrav1.Creating: - conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetRunningCondition, infrav1.ScaleSetCreatingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetRunningCondition, infrav1.ScaleSetCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") m.SetNotReady() case v == infrav1.Deleting: - conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetRunningCondition, infrav1.ScaleSetDeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetRunningCondition, infrav1.ScaleSetDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") m.SetNotReady() case v == infrav1.Failed: - conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetRunningCondition, infrav1.ScaleSetProvisionFailedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetRunningCondition, infrav1.ScaleSetProvisionFailedReason, clusterv1beta1.ConditionSeverityInfo, "") default: - conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetRunningCondition, string(v), clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(m.AzureMachinePool, infrav1.ScaleSetRunningCondition, string(v), clusterv1beta1.ConditionSeverityInfo, "") } } @@ -680,12 +679,12 @@ func (m *MachinePoolScope) PatchObject(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "scope.MachinePoolScope.PatchObject") defer done() - conditions.SetSummary(m.AzureMachinePool) + v1beta1conditions.SetSummary(m.AzureMachinePool) return m.patchHelper.Patch( ctx, m.AzureMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - clusterv1.ReadyCondition, + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + clusterv1beta1.ReadyCondition, infrav1.BootstrapSucceededCondition, infrav1.ScaleSetDesiredReplicasCondition, infrav1.ScaleSetModelUpdatedCondition, @@ -908,38 +907,38 @@ func (m *MachinePoolScope) SetSubnetName() error { } // UpdateDeleteStatus updates a condition on the AzureMachinePool status after a DELETE operation. -func (m *MachinePoolScope) UpdateDeleteStatus(condition clusterv1.ConditionType, service string, err error) { +func (m *MachinePoolScope) UpdateDeleteStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.DeletedReason, clusterv1.ConditionSeverityInfo, "%s successfully deleted", service) + v1beta1conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "%s successfully deleted", service) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.DeletingReason, clusterv1.ConditionSeverityInfo, "%s deleting", service) + v1beta1conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "%s deleting", service) default: - conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.DeletionFailedReason, clusterv1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.DeletionFailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error()) } } // UpdatePutStatus updates a condition on the AzureMachinePool status after a PUT operation. -func (m *MachinePoolScope) UpdatePutStatus(condition clusterv1.ConditionType, service string, err error) { +func (m *MachinePoolScope) UpdatePutStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkTrue(m.AzureMachinePool, condition) + v1beta1conditions.MarkTrue(m.AzureMachinePool, condition) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.CreatingReason, clusterv1.ConditionSeverityInfo, "%s creating or updating", service) + v1beta1conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.CreatingReason, clusterv1beta1.ConditionSeverityInfo, "%s creating or updating", service) default: - conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error()) } } // UpdatePatchStatus updates a condition on the AzureMachinePool status after a PATCH operation. -func (m *MachinePoolScope) UpdatePatchStatus(condition clusterv1.ConditionType, service string, err error) { +func (m *MachinePoolScope) UpdatePatchStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkTrue(m.AzureMachinePool, condition) + v1beta1conditions.MarkTrue(m.AzureMachinePool, condition) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.UpdatingReason, clusterv1.ConditionSeverityInfo, "%s updating", service) + v1beta1conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.UpdatingReason, clusterv1beta1.ConditionSeverityInfo, "%s updating", service) default: - conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) } } diff --git a/azure/scope/machinepool_test.go b/azure/scope/machinepool_test.go index 9a81bcdf68d..4ecd54234b7 100644 --- a/azure/scope/machinepool_test.go +++ b/azure/scope/machinepool_test.go @@ -36,9 +36,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -267,7 +266,7 @@ func TestMachinePoolScope_NetworkInterfaces(t *testing.T) { func TestMachinePoolScope_MaxSurge(t *testing.T) { cases := []struct { Name string - Setup func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool) + Setup func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool) Verify func(g *WithT, surge int, err error) }{ { @@ -279,7 +278,7 @@ func TestMachinePoolScope_MaxSurge(t *testing.T) { }, { Name: "default surge should be 1 regardless of replica count with no surger", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool) { mp.Spec.Replicas = ptr.To[int32](3) }, Verify: func(g *WithT, surge int, err error) { @@ -289,7 +288,7 @@ func TestMachinePoolScope_MaxSurge(t *testing.T) { }, { Name: "default surge should be 2 as specified by the surger", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool) { mp.Spec.Replicas = ptr.To[int32](3) two := intstr.FromInt(2) amp.Spec.Strategy = infrav1exp.AzureMachinePoolDeploymentStrategy{ @@ -306,7 +305,7 @@ func TestMachinePoolScope_MaxSurge(t *testing.T) { }, { Name: "default surge should be 2 (50%) of the desired replicas", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool) { mp.Spec.Replicas = ptr.To[int32](4) fiftyPercent := intstr.FromString("50%") amp.Spec.Strategy = infrav1exp.AzureMachinePoolDeploymentStrategy{ @@ -336,12 +335,12 @@ func TestMachinePoolScope_MaxSurge(t *testing.T) { { Name: "mp1", Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, } - mp = &expv1.MachinePool{ + mp = &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp1", Namespace: "default", @@ -376,7 +375,7 @@ func TestMachinePoolScope_SaveVMImageToStatus(t *testing.T) { { Name: "mp1", Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -413,12 +412,12 @@ func TestMachinePoolScope_GetVMImage(t *testing.T) { clusterMock.EXPECT().Token().Return(&azidentity.DefaultAzureCredential{}).AnyTimes() cases := []struct { Name string - Setup func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool) + Setup func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool) Verify func(g *WithT, amp *infrav1exp.AzureMachinePool, vmImage *infrav1.Image, err error) }{ { Name: "should set and default the image if no image is specified for the AzureMachinePool", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool) { mp.Spec.Template.Spec.Version = ptr.To("v1.19.11") }, Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, vmImage *infrav1.Image, err error) { @@ -436,7 +435,7 @@ func TestMachinePoolScope_GetVMImage(t *testing.T) { }, { Name: "should not default or set the image on the AzureMachinePool if it already exists", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool) { mp.Spec.Template.Spec.Version = ptr.To("v1.19.11") amp.Spec.Template.Image = &infrav1.Image{ Marketplace: &infrav1.AzureMarketplaceImage{ @@ -482,12 +481,12 @@ func TestMachinePoolScope_GetVMImage(t *testing.T) { { Name: "mp1", Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, } - mp = &expv1.MachinePool{ + mp = &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp1", Namespace: "default", @@ -514,12 +513,12 @@ func TestMachinePoolScope_GetVMImage(t *testing.T) { func TestMachinePoolScope_NeedsRequeue(t *testing.T) { cases := []struct { Name string - Setup func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) + Setup func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) Verify func(g *WithT, requeue bool) }{ { Name: "should requeue if the machine is not in succeeded state", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { creating := infrav1.Creating mp.Spec.Replicas = ptr.To[int32](0) amp.Status.ProvisioningState = &creating @@ -530,7 +529,7 @@ func TestMachinePoolScope_NeedsRequeue(t *testing.T) { }, { Name: "should not requeue if the machine is in succeeded state", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { succeeded := infrav1.Succeeded mp.Spec.Replicas = ptr.To[int32](0) amp.Status.ProvisioningState = &succeeded @@ -541,7 +540,7 @@ func TestMachinePoolScope_NeedsRequeue(t *testing.T) { }, { Name: "should requeue if the machine is in succeeded state but desired replica count does not match", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { succeeded := infrav1.Succeeded mp.Spec.Replicas = ptr.To[int32](1) amp.Status.ProvisioningState = &succeeded @@ -552,7 +551,7 @@ func TestMachinePoolScope_NeedsRequeue(t *testing.T) { }, { Name: "should not requeue if the machine is in succeeded state but desired replica count does match", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { succeeded := infrav1.Succeeded mp.Spec.Replicas = ptr.To[int32](1) amp.Status.ProvisioningState = &succeeded @@ -568,7 +567,7 @@ func TestMachinePoolScope_NeedsRequeue(t *testing.T) { }, { Name: "should requeue if an instance VM image does not match the VM image of the VMSS", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmss *azure.VMSS) { succeeded := infrav1.Succeeded mp.Spec.Replicas = ptr.To[int32](1) amp.Status.ProvisioningState = &succeeded @@ -602,12 +601,12 @@ func TestMachinePoolScope_NeedsRequeue(t *testing.T) { { Name: "mp1", Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, } - mp = &expv1.MachinePool{ + mp = &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp1", Namespace: "default", @@ -633,9 +632,8 @@ func TestMachinePoolScope_NeedsRequeue(t *testing.T) { func TestMachinePoolScope_updateReplicasAndProviderIDs(t *testing.T) { scheme := runtime.NewScheme() - _ = clusterv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1exp.AddToScheme(scheme) - _ = expv1.AddToScheme(scheme) cases := []struct { Name string @@ -675,7 +673,7 @@ func TestMachinePoolScope_updateReplicasAndProviderIDs(t *testing.T) { Name: "should only count machines with matching cluster name label", Setup: func(cb *fake.ClientBuilder) { machines := getReadyAzureMachinePoolMachines(3) - machines[0].Labels[clusterv1.ClusterNameLabel] = "not_correct" + machines[0].Labels[clusterv1beta1.ClusterNameLabel] = "not_correct" for _, machine := range machines { obj := machine cb.WithObjects(&obj) @@ -694,21 +692,21 @@ func TestMachinePoolScope_updateReplicasAndProviderIDs(t *testing.T) { g = NewWithT(t) mockCtrl = gomock.NewController(t) cb = fake.NewClientBuilder().WithScheme(scheme) - cluster = &clusterv1.Cluster{ + cluster = &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ Name: "azCluster1", }, }, - Status: clusterv1.ClusterStatus{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, } - mp = &expv1.MachinePool{ + mp = &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp1", Namespace: "default", @@ -722,7 +720,7 @@ func TestMachinePoolScope_updateReplicasAndProviderIDs(t *testing.T) { { Name: "mp1", Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -765,7 +763,7 @@ func TestMachinePoolScope_RoleAssignmentSpecs(t *testing.T) { { name: "returns role assignment spec if VM identity is system assigned", machinePoolScope: MachinePoolScope{ - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -805,7 +803,7 @@ func TestMachinePoolScope_RoleAssignmentSpecs(t *testing.T) { { name: "returns role assignment spec if scope and role definition ID are set", machinePoolScope: MachinePoolScope{ - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -865,7 +863,7 @@ func TestMachinePoolScope_VMSSExtensionSpecs(t *testing.T) { { name: "If OS type is Linux and cloud is AzurePublicCloud, it returns ExtensionSpec", machinePoolScope: MachinePoolScope{ - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machinepool-name", @@ -910,7 +908,7 @@ func TestMachinePoolScope_VMSSExtensionSpecs(t *testing.T) { { name: "If OS type is Linux and cloud is not AzurePublicCloud, it returns empty", machinePoolScope: MachinePoolScope{ - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machinepool-name", @@ -942,7 +940,7 @@ func TestMachinePoolScope_VMSSExtensionSpecs(t *testing.T) { { name: "If OS type is Windows and cloud is AzurePublicCloud, it returns ExtensionSpec", machinePoolScope: MachinePoolScope{ - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{ ObjectMeta: metav1.ObjectMeta{ // Note: machine pool names longer than 9 characters get truncated. See MachinePoolScope::Name() for more details. @@ -989,7 +987,7 @@ func TestMachinePoolScope_VMSSExtensionSpecs(t *testing.T) { { name: "If OS type is Windows and cloud is not AzurePublicCloud, it returns empty", machinePoolScope: MachinePoolScope{ - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machinepool-name", @@ -1021,7 +1019,7 @@ func TestMachinePoolScope_VMSSExtensionSpecs(t *testing.T) { { name: "If OS type is not Linux or Windows and cloud is AzurePublicCloud, it returns empty", machinePoolScope: MachinePoolScope{ - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machinepool-name", @@ -1053,7 +1051,7 @@ func TestMachinePoolScope_VMSSExtensionSpecs(t *testing.T) { { name: "If OS type is not Windows or Linux and cloud is not AzurePublicCloud, it returns empty", machinePoolScope: MachinePoolScope{ - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machinepool-name", @@ -1085,7 +1083,7 @@ func TestMachinePoolScope_VMSSExtensionSpecs(t *testing.T) { { name: "If a custom VM extension is specified, it returns the custom VM extension", machinePoolScope: MachinePoolScope{ - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machinepool-name", @@ -1184,10 +1182,10 @@ func getReadyAzureMachinePoolMachines(count int32) []infrav1exp.AzureMachinePool }, }, Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster1", - infrav1exp.MachinePoolNameLabel: "amp1", - clusterv1.MachinePoolNameLabel: "mp1", - "cluster1": string(infrav1.ResourceLifecycleOwned), + clusterv1beta1.ClusterNameLabel: "cluster1", + infrav1exp.MachinePoolNameLabel: "amp1", + clusterv1beta1.MachinePoolNameLabel: "mp1", + "cluster1": string(infrav1.ResourceLifecycleOwned), }, }, Spec: infrav1exp.AzureMachinePoolMachineSpec{ @@ -1216,10 +1214,10 @@ func getAzureMachinePoolMachine(index int) infrav1exp.AzureMachinePoolMachine { }, }, Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster1", - infrav1exp.MachinePoolNameLabel: "amp1", - clusterv1.MachinePoolNameLabel: "mp1", - "cluster1": string(infrav1.ResourceLifecycleOwned), + clusterv1beta1.ClusterNameLabel: "cluster1", + infrav1exp.MachinePoolNameLabel: "amp1", + clusterv1beta1.MachinePoolNameLabel: "mp1", + "cluster1": string(infrav1.ResourceLifecycleOwned), }, }, Spec: infrav1exp.AzureMachinePoolMachineSpec{ @@ -1232,9 +1230,9 @@ func getAzureMachinePoolMachine(index int) infrav1exp.AzureMachinePoolMachine { } } -func getAzureMachinePoolMachineWithOwnerMachine(index int) (clusterv1.Machine, infrav1exp.AzureMachinePoolMachine) { +func getAzureMachinePoolMachineWithOwnerMachine(index int) (clusterv1beta1.Machine, infrav1exp.AzureMachinePoolMachine) { ampm := getAzureMachinePoolMachine(index) - machine := clusterv1.Machine{ + machine := clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("mpm%d", index), Namespace: "default", @@ -1242,15 +1240,15 @@ func getAzureMachinePoolMachineWithOwnerMachine(index int) (clusterv1.Machine, i { Name: "mp", Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster1", - clusterv1.MachinePoolNameLabel: "mp1", + clusterv1beta1.ClusterNameLabel: "cluster1", + clusterv1beta1.MachinePoolNameLabel: "mp1", }, }, - Spec: clusterv1.MachineSpec{ + Spec: clusterv1beta1.MachineSpec{ ProviderID: &m.Spec.ProviderID, InfrastructureRef: corev1.ObjectReference{ Kind: "AzureMachinePoolMachine", @@ -1263,7 +1261,7 @@ func getAzureMachinePoolMachineWithOwnerMachine(index int) (clusterv1.Machine, i ampm.OwnerReferences = append(ampm.OwnerReferences, metav1.OwnerReference{ Name: machine.Name, Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }) return machine, ampm @@ -1312,18 +1310,18 @@ func TestMachinePoolScope_applyAzureMachinePoolMachines(t *testing.T) { ctx, cancel := context.WithCancel(t.Context()) defer cancel() scheme := runtime.NewScheme() - _ = clusterv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1exp.AddToScheme(scheme) tests := []struct { Name string - Setup func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) + Setup func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) Verify func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client, err error) }{ { Name: "if MachinePool is externally managed and overProvisionCount > 0, do not try to reduce replicas", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) { - mp.Annotations = map[string]string{clusterv1.ReplicasManagedByAnnotation: "cluster-autoscaler"} + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) { + mp.Annotations = map[string]string{clusterv1beta1.ReplicasManagedByAnnotation: "cluster-autoscaler"} mp.Spec.Replicas = ptr.To[int32](1) mpm1, ampm1 := getAzureMachinePoolMachineWithOwnerMachine(1) @@ -1344,14 +1342,14 @@ func TestMachinePoolScope_applyAzureMachinePoolMachines(t *testing.T) { }, Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client, err error) { g.Expect(err).NotTo(HaveOccurred()) - list := clusterv1.MachineList{} + list := clusterv1beta1.MachineList{} g.Expect(c.List(ctx, &list)).NotTo(HaveOccurred()) g.Expect(list.Items).Should(HaveLen(2)) }, }, { Name: "if MachinePool is not externally managed and overProvisionCount > 0, reduce replicas", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) { mp.Spec.Replicas = ptr.To[int32](1) mpm1, ampm1 := getAzureMachinePoolMachineWithOwnerMachine(1) @@ -1373,21 +1371,21 @@ func TestMachinePoolScope_applyAzureMachinePoolMachines(t *testing.T) { }, Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client, err error) { g.Expect(err).NotTo(HaveOccurred()) - list := clusterv1.MachineList{} + list := clusterv1beta1.MachineList{} g.Expect(c.List(ctx, &list)).NotTo(HaveOccurred()) g.Expect(list.Items).Should(HaveLen(1)) }, }, { Name: "if MachinePool is not externally managed, and Machines have delete machine annotation, and overProvisionCount > 0, delete machines with deleteMachine annotation first", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) { mp.Spec.Replicas = ptr.To[int32](2) mpm1, ampm1 := getAzureMachinePoolMachineWithOwnerMachine(1) mpm2, ampm2 := getAzureMachinePoolMachineWithOwnerMachine(2) mpm2.Annotations = map[string]string{ - clusterv1.DeleteMachineAnnotation: time.Now().String(), + clusterv1beta1.DeleteMachineAnnotation: time.Now().String(), } mpm3, ampm3 := getAzureMachinePoolMachineWithOwnerMachine(3) @@ -1411,7 +1409,7 @@ func TestMachinePoolScope_applyAzureMachinePoolMachines(t *testing.T) { }, Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client, err error) { g.Expect(err).NotTo(HaveOccurred()) - list := clusterv1.MachineList{} + list := clusterv1beta1.MachineList{} g.Expect(c.List(ctx, &list)).NotTo(HaveOccurred()) g.Expect(list.Items).Should(HaveLen(2)) g.Expect(list.Items[0].Name).Should(Equal("mpm1")) @@ -1420,7 +1418,7 @@ func TestMachinePoolScope_applyAzureMachinePoolMachines(t *testing.T) { }, { Name: "if existing MachinePool is not present, reduce replicas", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) { mp.Spec.Replicas = ptr.To[int32](1) vmssState.Instances = []azure.VMSSVM{ @@ -1439,7 +1437,7 @@ func TestMachinePoolScope_applyAzureMachinePoolMachines(t *testing.T) { }, { Name: "if existing MachinePool is not present and Instances ID is in wrong format, reduce replicas", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) { mp.Spec.Replicas = ptr.To[int32](1) vmssState.Instances = []azure.VMSSVM{ @@ -1455,7 +1453,7 @@ func TestMachinePoolScope_applyAzureMachinePoolMachines(t *testing.T) { }, { Name: "if existing MachinePool is present but in deleting state, do not recreate AzureMachinePoolMachines", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, vmssState *azure.VMSS, cb *fake.ClientBuilder) { mp.Spec.Replicas = ptr.To[int32](1) vmssState.Instances = []azure.VMSSVM{ @@ -1480,21 +1478,21 @@ func TestMachinePoolScope_applyAzureMachinePoolMachines(t *testing.T) { g = NewWithT(t) mockCtrl = gomock.NewController(t) cb = fake.NewClientBuilder().WithScheme(scheme) - cluster = &clusterv1.Cluster{ + cluster = &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ Name: "azCluster1", }, }, - Status: clusterv1.ClusterStatus{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, } - mp = &expv1.MachinePool{ + mp = &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp1", Namespace: "default", @@ -1502,7 +1500,7 @@ func TestMachinePoolScope_applyAzureMachinePoolMachines(t *testing.T) { { Name: "cluster1", Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -1515,7 +1513,7 @@ func TestMachinePoolScope_applyAzureMachinePoolMachines(t *testing.T) { { Name: "mp1", Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -1542,38 +1540,38 @@ func TestMachinePoolScope_applyAzureMachinePoolMachines(t *testing.T) { func TestMachinePoolScope_setProvisioningStateAndConditions(t *testing.T) { scheme := runtime.NewScheme() - _ = clusterv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1exp.AddToScheme(scheme) tests := []struct { Name string - Setup func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) + Setup func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) Verify func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client) ProvisioningState infrav1.ProvisioningState }{ { Name: "if provisioning state is set to Succeeded and replicas match, MachinePool is ready and conditions match", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) { mp.Spec.Replicas = ptr.To[int32](1) amp.Status.Replicas = 1 }, Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client) { g.Expect(amp.Status.Ready).To(BeTrue()) - g.Expect(conditions.Get(amp, infrav1.ScaleSetRunningCondition).Status).To(Equal(corev1.ConditionTrue)) - g.Expect(conditions.Get(amp, infrav1.ScaleSetModelUpdatedCondition).Status).To(Equal(corev1.ConditionTrue)) - g.Expect(conditions.Get(amp, infrav1.ScaleSetDesiredReplicasCondition).Status).To(Equal(corev1.ConditionTrue)) + g.Expect(v1beta1conditions.Get(amp, infrav1.ScaleSetRunningCondition).Status).To(Equal(corev1.ConditionTrue)) + g.Expect(v1beta1conditions.Get(amp, infrav1.ScaleSetModelUpdatedCondition).Status).To(Equal(corev1.ConditionTrue)) + g.Expect(v1beta1conditions.Get(amp, infrav1.ScaleSetDesiredReplicasCondition).Status).To(Equal(corev1.ConditionTrue)) }, ProvisioningState: infrav1.Succeeded, }, { Name: "if provisioning state is set to Succeeded and replicas are higher on AzureMachinePool, MachinePool is ready and ScalingDown", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) { mp.Spec.Replicas = ptr.To[int32](1) amp.Status.Replicas = 2 }, Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client) { g.Expect(amp.Status.Ready).To(BeTrue()) - condition := conditions.Get(amp, infrav1.ScaleSetDesiredReplicasCondition) + condition := v1beta1conditions.Get(amp, infrav1.ScaleSetDesiredReplicasCondition) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(condition.Reason).To(Equal(infrav1.ScaleSetScaleDownReason)) }, @@ -1581,13 +1579,13 @@ func TestMachinePoolScope_setProvisioningStateAndConditions(t *testing.T) { }, { Name: "if provisioning state is set to Succeeded and replicas are lower on AzureMachinePool, MachinePool is ready and ScalingUp", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) { + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) { mp.Spec.Replicas = ptr.To[int32](2) amp.Status.Replicas = 1 }, Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client) { g.Expect(amp.Status.Ready).To(BeTrue()) - condition := conditions.Get(amp, infrav1.ScaleSetDesiredReplicasCondition) + condition := v1beta1conditions.Get(amp, infrav1.ScaleSetDesiredReplicasCondition) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(condition.Reason).To(Equal(infrav1.ScaleSetScaleUpReason)) }, @@ -1595,10 +1593,10 @@ func TestMachinePoolScope_setProvisioningStateAndConditions(t *testing.T) { }, { Name: "if provisioning state is set to Updating, MachinePool is ready and scale set model is set to OutOfDate", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) {}, + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) {}, Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client) { g.Expect(amp.Status.Ready).To(BeTrue()) - condition := conditions.Get(amp, infrav1.ScaleSetModelUpdatedCondition) + condition := v1beta1conditions.Get(amp, infrav1.ScaleSetModelUpdatedCondition) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(condition.Reason).To(Equal(infrav1.ScaleSetModelOutOfDateReason)) }, @@ -1606,10 +1604,10 @@ func TestMachinePoolScope_setProvisioningStateAndConditions(t *testing.T) { }, { Name: "if provisioning state is set to Creating, MachinePool is NotReady and scale set running condition is set to Creating", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) {}, + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) {}, Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client) { g.Expect(amp.Status.Ready).To(BeFalse()) - condition := conditions.Get(amp, infrav1.ScaleSetRunningCondition) + condition := v1beta1conditions.Get(amp, infrav1.ScaleSetRunningCondition) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(condition.Reason).To(Equal(infrav1.ScaleSetCreatingReason)) }, @@ -1617,10 +1615,10 @@ func TestMachinePoolScope_setProvisioningStateAndConditions(t *testing.T) { }, { Name: "if provisioning state is set to Deleting, MachinePool is NotReady and scale set running condition is set to Deleting", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) {}, + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) {}, Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client) { g.Expect(amp.Status.Ready).To(BeFalse()) - condition := conditions.Get(amp, infrav1.ScaleSetRunningCondition) + condition := v1beta1conditions.Get(amp, infrav1.ScaleSetRunningCondition) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(condition.Reason).To(Equal(infrav1.ScaleSetDeletingReason)) }, @@ -1628,9 +1626,9 @@ func TestMachinePoolScope_setProvisioningStateAndConditions(t *testing.T) { }, { Name: "if provisioning state is set to Failed, MachinePool ready state is not adjusted, and scale set running condition is set to Failed", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) {}, + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) {}, Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client) { - condition := conditions.Get(amp, infrav1.ScaleSetRunningCondition) + condition := v1beta1conditions.Get(amp, infrav1.ScaleSetRunningCondition) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(condition.Reason).To(Equal(infrav1.ScaleSetProvisionFailedReason)) }, @@ -1638,9 +1636,9 @@ func TestMachinePoolScope_setProvisioningStateAndConditions(t *testing.T) { }, { Name: "if provisioning state is set to something not explicitly handled, MachinePool ready state is not adjusted, and scale set running condition is set to the ProvisioningState", - Setup: func(mp *expv1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) {}, + Setup: func(mp *clusterv1beta1.MachinePool, amp *infrav1exp.AzureMachinePool, cb *fake.ClientBuilder) {}, Verify: func(g *WithT, amp *infrav1exp.AzureMachinePool, c client.Client) { - condition := conditions.Get(amp, infrav1.ScaleSetRunningCondition) + condition := v1beta1conditions.Get(amp, infrav1.ScaleSetRunningCondition) g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(condition.Reason).To(Equal(string(infrav1.Migrating))) }, @@ -1653,21 +1651,21 @@ func TestMachinePoolScope_setProvisioningStateAndConditions(t *testing.T) { g = NewWithT(t) mockCtrl = gomock.NewController(t) cb = fake.NewClientBuilder().WithScheme(scheme) - cluster = &clusterv1.Cluster{ + cluster = &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ Name: "azCluster1", }, }, - Status: clusterv1.ClusterStatus{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, } - mp = &expv1.MachinePool{ + mp = &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp1", Namespace: "default", @@ -1675,7 +1673,7 @@ func TestMachinePoolScope_setProvisioningStateAndConditions(t *testing.T) { { Name: "cluster1", Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -1688,7 +1686,7 @@ func TestMachinePoolScope_setProvisioningStateAndConditions(t *testing.T) { { Name: "mp1", Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -1717,7 +1715,7 @@ func TestBootstrapDataChanges(t *testing.T) { ctx, cancel := context.WithCancel(t.Context()) defer cancel() scheme := runtime.NewScheme() - _ = clusterv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) _ = infrav1exp.AddToScheme(scheme) _ = corev1.AddToScheme(scheme) @@ -1726,17 +1724,17 @@ func TestBootstrapDataChanges(t *testing.T) { g = NewWithT(t) mockCtrl = gomock.NewController(t) cb = fake.NewClientBuilder().WithScheme(scheme) - cluster = &clusterv1.Cluster{ + cluster = &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ Name: "azCluster1", }, }, - Status: clusterv1.ClusterStatus{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, } @@ -1751,7 +1749,7 @@ func TestBootstrapDataChanges(t *testing.T) { }, }, } - mp = &expv1.MachinePool{ + mp = &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp1", Namespace: "default", @@ -1759,14 +1757,14 @@ func TestBootstrapDataChanges(t *testing.T) { { Name: "cluster1", Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ + Bootstrap: clusterv1beta1.Bootstrap{ DataSecretName: ptr.To("mp-secret"), }, Version: ptr.To("v1.31.0"), @@ -1791,7 +1789,7 @@ func TestBootstrapDataChanges(t *testing.T) { { Name: "mp1", Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, Annotations: map[string]string{ diff --git a/azure/scope/machinepoolmachine.go b/azure/scope/machinepoolmachine.go index dcc63de6a25..0256304d899 100644 --- a/azure/scope/machinepoolmachine.go +++ b/azure/scope/machinepoolmachine.go @@ -25,12 +25,11 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/noderefutil" "sigs.k8s.io/cluster-api/controllers/remote" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -65,8 +64,8 @@ type ( AzureMachinePoolMachine *infrav1exp.AzureMachinePoolMachine Client client.Client ClusterScope azure.ClusterScoper - MachinePool *expv1.MachinePool - Machine *clusterv1.Machine + MachinePool *clusterv1beta1.MachinePool + Machine *clusterv1beta1.Machine // workloadNodeGetter is only used for testing purposes and provides a way for mocking requests to the workload cluster workloadNodeGetter nodeGetter @@ -77,11 +76,11 @@ type ( azure.ClusterScoper AzureMachinePoolMachine *infrav1exp.AzureMachinePoolMachine AzureMachinePool *infrav1exp.AzureMachinePool - MachinePool *expv1.MachinePool - Machine *clusterv1.Machine + MachinePool *clusterv1beta1.MachinePool + Machine *clusterv1beta1.Machine MachinePoolScope *MachinePoolScope client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper instance *azure.VMSSVM // workloadNodeGetter is only used for testing purposes and provides a way for mocking requests to the workload cluster @@ -136,7 +135,7 @@ func NewMachinePoolMachineScope(params MachinePoolMachineScopeParams) (*MachineP return nil, errors.Wrap(err, "failed to build machine pool scope") } - helper, err := patch.NewHelper(params.AzureMachinePoolMachine, params.Client) + helper, err := v1beta1patch.NewHelper(params.AzureMachinePoolMachine, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -209,38 +208,38 @@ func (s *MachinePoolMachineScope) DeleteLongRunningOperationState(name, service, } // UpdateDeleteStatus updates a condition on the AzureMachinePoolMachine status after a DELETE operation. -func (s *MachinePoolMachineScope) UpdateDeleteStatus(condition clusterv1.ConditionType, service string, err error) { +func (s *MachinePoolMachineScope) UpdateDeleteStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.DeletedReason, clusterv1.ConditionSeverityInfo, "%s successfully deleted", service) + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "%s successfully deleted", service) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.DeletingReason, clusterv1.ConditionSeverityInfo, "%s deleting", service) + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "%s deleting", service) default: - conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.DeletionFailedReason, clusterv1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.DeletionFailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error()) } } // UpdatePutStatus updates a condition on the AzureMachinePoolMachine status after a PUT operation. -func (s *MachinePoolMachineScope) UpdatePutStatus(condition clusterv1.ConditionType, service string, err error) { +func (s *MachinePoolMachineScope) UpdatePutStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkTrue(s.AzureMachinePoolMachine, condition) + v1beta1conditions.MarkTrue(s.AzureMachinePoolMachine, condition) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.CreatingReason, clusterv1.ConditionSeverityInfo, "%s creating or updating", service) + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.CreatingReason, clusterv1beta1.ConditionSeverityInfo, "%s creating or updating", service) default: - conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error()) } } // UpdatePatchStatus updates a condition on the AzureMachinePoolMachine status after a PATCH operation. -func (s *MachinePoolMachineScope) UpdatePatchStatus(condition clusterv1.ConditionType, service string, err error) { +func (s *MachinePoolMachineScope) UpdatePatchStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkTrue(s.AzureMachinePoolMachine, condition) + v1beta1conditions.MarkTrue(s.AzureMachinePoolMachine, condition) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.UpdatingReason, clusterv1.ConditionSeverityInfo, "%s updating", service) + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.UpdatingReason, clusterv1beta1.ConditionSeverityInfo, "%s updating", service) default: - conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) } } @@ -285,29 +284,29 @@ func (s *MachinePoolMachineScope) ProviderID() string { return s.AzureMachinePoolMachine.Spec.ProviderID } -// updateDeleteMachineAnnotation sets the clusterv1.DeleteMachineAnnotation on the AzureMachinePoolMachine if it exists on the owner Machine. +// updateDeleteMachineAnnotation sets the clusterv1beta1.DeleteMachineAnnotation on the AzureMachinePoolMachine if it exists on the owner Machine. func (s *MachinePoolMachineScope) updateDeleteMachineAnnotation() { if s.Machine.Annotations != nil { - if _, ok := s.Machine.Annotations[clusterv1.DeleteMachineAnnotation]; ok { + if _, ok := s.Machine.Annotations[clusterv1beta1.DeleteMachineAnnotation]; ok { if s.AzureMachinePoolMachine.Annotations == nil { s.AzureMachinePoolMachine.Annotations = map[string]string{} } - s.AzureMachinePoolMachine.Annotations[clusterv1.DeleteMachineAnnotation] = "true" + s.AzureMachinePoolMachine.Annotations[clusterv1beta1.DeleteMachineAnnotation] = "true" } } } // PatchObject persists the MachinePoolMachine spec and status. func (s *MachinePoolMachineScope) PatchObject(ctx context.Context) error { - conditions.SetSummary(s.AzureMachinePoolMachine) + v1beta1conditions.SetSummary(s.AzureMachinePoolMachine) return s.patchHelper.Patch( ctx, s.AzureMachinePoolMachine, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - clusterv1.ReadyCondition, - clusterv1.MachineNodeHealthyCondition, + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + clusterv1beta1.ReadyCondition, + clusterv1beta1.MachineNodeHealthyCondition, }}) } @@ -337,13 +336,13 @@ func (s *MachinePoolMachineScope) UpdateNodeStatus(ctx context.Context) error { if s.instance != nil { switch s.instance.BootstrappingState { case infrav1.Creating: - conditions.MarkFalse(s.AzureMachinePoolMachine, infrav1.BootstrapSucceededCondition, infrav1.BootstrapInProgressReason, clusterv1.ConditionSeverityInfo, "VM bootstrapping") + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, infrav1.BootstrapSucceededCondition, infrav1.BootstrapInProgressReason, clusterv1beta1.ConditionSeverityInfo, "VM bootstrapping") case infrav1.Failed: log.Info("VM bootstrapping failed") - conditions.MarkFalse(s.AzureMachinePoolMachine, infrav1.BootstrapSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityInfo, "VM bootstrapping failed") + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, infrav1.BootstrapSucceededCondition, infrav1.BootstrapFailedReason, clusterv1beta1.ConditionSeverityInfo, "VM bootstrapping failed") case infrav1.Succeeded: log.Info("VM bootstrapping succeeded") - conditions.MarkTrue(s.AzureMachinePoolMachine, infrav1.BootstrapSucceededCondition) + v1beta1conditions.MarkTrue(s.AzureMachinePoolMachine, infrav1.BootstrapSucceededCondition) } } @@ -355,24 +354,24 @@ func (s *MachinePoolMachineScope) UpdateNodeStatus(ctx context.Context) error { switch { case err != nil && apierrors.IsNotFound(err) && nodeRef != nil && nodeRef.Name != "": // Node was not found due to 404 when finding by ObjectReference. - conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1beta1.MachineNodeHealthyCondition, clusterv1beta1.NodeNotFoundReason, clusterv1beta1.ConditionSeverityError, "") case err != nil: // Failed due to an unexpected error return err case !found && s.ProviderID() == "": // Node was not found due to not having a providerID set - conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.WaitingForNodeRefReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1beta1.MachineNodeHealthyCondition, clusterv1beta1.WaitingForNodeRefReason, clusterv1beta1.ConditionSeverityInfo, "") case !found && s.ProviderID() != "": // Node was not found due to not finding a matching node by providerID - conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeProvisioningReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1beta1.MachineNodeHealthyCondition, clusterv1beta1.NodeProvisioningReason, clusterv1beta1.ConditionSeverityInfo, "") default: // Node was found. Check if it is ready. nodeReady := noderefutil.IsNodeReady(node) s.AzureMachinePoolMachine.Status.Ready = nodeReady if nodeReady { - conditions.MarkTrue(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition) + v1beta1conditions.MarkTrue(s.AzureMachinePoolMachine, clusterv1beta1.MachineNodeHealthyCondition) } else { - conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeConditionsFailedReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1beta1.MachineNodeHealthyCondition, clusterv1beta1.NodeConditionsFailedReason, clusterv1beta1.ConditionSeverityWarning, "") } s.AzureMachinePoolMachine.Status.NodeRef = &corev1.ObjectReference{ diff --git a/azure/scope/machinepoolmachine_test.go b/azure/scope/machinepoolmachine_test.go index 2be064caceb..9b84c3602f4 100644 --- a/azure/scope/machinepoolmachine_test.go +++ b/azure/scope/machinepoolmachine_test.go @@ -28,9 +28,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -48,7 +47,7 @@ const ( func TestNewMachinePoolMachineScope(t *testing.T) { scheme := runtime.NewScheme() - _ = expv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1exp.AddToScheme(scheme) cases := []struct { @@ -61,15 +60,15 @@ func TestNewMachinePoolMachineScope(t *testing.T) { Input: MachinePoolMachineScopeParams{ Client: fake.NewClientBuilder().WithScheme(scheme).Build(), ClusterScope: &ClusterScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "clusterName", }, }, }, - MachinePool: new(expv1.MachinePool), + MachinePool: new(clusterv1beta1.MachinePool), AzureMachinePool: new(infrav1exp.AzureMachinePool), - Machine: new(clusterv1.Machine), + Machine: new(clusterv1beta1.Machine), AzureMachinePoolMachine: new(infrav1exp.AzureMachinePoolMachine), }, }, @@ -77,9 +76,9 @@ func TestNewMachinePoolMachineScope(t *testing.T) { Name: "no client", Input: MachinePoolMachineScopeParams{ ClusterScope: new(ClusterScope), - MachinePool: new(expv1.MachinePool), + MachinePool: new(clusterv1beta1.MachinePool), AzureMachinePool: new(infrav1exp.AzureMachinePool), - Machine: new(clusterv1.Machine), + Machine: new(clusterv1beta1.Machine), AzureMachinePoolMachine: new(infrav1exp.AzureMachinePoolMachine), }, Err: "client is required when creating a MachinePoolScope", @@ -88,9 +87,9 @@ func TestNewMachinePoolMachineScope(t *testing.T) { Name: "no ClusterScope", Input: MachinePoolMachineScopeParams{ Client: fake.NewClientBuilder().WithScheme(scheme).Build(), - MachinePool: new(expv1.MachinePool), + MachinePool: new(clusterv1beta1.MachinePool), AzureMachinePool: new(infrav1exp.AzureMachinePool), - Machine: new(clusterv1.Machine), + Machine: new(clusterv1beta1.Machine), AzureMachinePoolMachine: new(infrav1exp.AzureMachinePoolMachine), }, Err: "cluster scope is required when creating a MachinePoolScope", @@ -101,7 +100,7 @@ func TestNewMachinePoolMachineScope(t *testing.T) { Client: fake.NewClientBuilder().WithScheme(scheme).Build(), ClusterScope: new(ClusterScope), AzureMachinePool: new(infrav1exp.AzureMachinePool), - Machine: new(clusterv1.Machine), + Machine: new(clusterv1beta1.Machine), AzureMachinePoolMachine: new(infrav1exp.AzureMachinePoolMachine), }, Err: "machine pool is required when creating a MachinePoolScope", @@ -111,8 +110,8 @@ func TestNewMachinePoolMachineScope(t *testing.T) { Input: MachinePoolMachineScopeParams{ Client: fake.NewClientBuilder().WithScheme(scheme).Build(), ClusterScope: new(ClusterScope), - MachinePool: new(expv1.MachinePool), - Machine: new(clusterv1.Machine), + MachinePool: new(clusterv1beta1.MachinePool), + Machine: new(clusterv1beta1.Machine), AzureMachinePoolMachine: new(infrav1exp.AzureMachinePoolMachine), }, Err: "azure machine pool is required when creating a MachinePoolScope", @@ -122,8 +121,8 @@ func TestNewMachinePoolMachineScope(t *testing.T) { Input: MachinePoolMachineScopeParams{ Client: fake.NewClientBuilder().WithScheme(scheme).Build(), ClusterScope: new(ClusterScope), - MachinePool: new(expv1.MachinePool), - Machine: new(clusterv1.Machine), + MachinePool: new(clusterv1beta1.MachinePool), + Machine: new(clusterv1beta1.Machine), AzureMachinePool: new(infrav1exp.AzureMachinePool), }, Err: "azure machine pool machine is required when creating a MachinePoolScope", @@ -133,7 +132,7 @@ func TestNewMachinePoolMachineScope(t *testing.T) { Input: MachinePoolMachineScopeParams{ Client: fake.NewClientBuilder().WithScheme(scheme).Build(), ClusterScope: new(ClusterScope), - MachinePool: new(expv1.MachinePool), + MachinePool: new(clusterv1beta1.MachinePool), AzureMachinePool: new(infrav1exp.AzureMachinePool), AzureMachinePoolMachine: new(infrav1exp.AzureMachinePoolMachine), }, @@ -164,7 +163,7 @@ func TestMachinePoolMachineScope_ScaleSetVMSpecs(t *testing.T) { { name: "return vmss vm spec for uniform vmss", machinePoolMachineScope: MachinePoolMachineScope{ - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machinepool-name", @@ -215,7 +214,7 @@ func TestMachinePoolMachineScope_ScaleSetVMSpecs(t *testing.T) { { name: "return vmss vm spec for vmss flex", machinePoolMachineScope: MachinePoolMachineScope{ - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machinepool-name", @@ -275,15 +274,15 @@ func TestMachinePoolMachineScope_ScaleSetVMSpecs(t *testing.T) { func TestMachineScope_updateDeleteMachineAnnotation(t *testing.T) { cases := []struct { name string - machine clusterv1.Machine + machine clusterv1beta1.Machine ampm infrav1exp.AzureMachinePoolMachine }{ { name: "add annotation to ampm", - machine: clusterv1.Machine{ + machine: clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.DeleteMachineAnnotation: "true", + clusterv1beta1.DeleteMachineAnnotation: "true", }, }, }, @@ -291,12 +290,12 @@ func TestMachineScope_updateDeleteMachineAnnotation(t *testing.T) { }, { name: "do not add annotation to ampm when machine annotations are nil", - machine: clusterv1.Machine{}, + machine: clusterv1beta1.Machine{}, ampm: infrav1exp.AzureMachinePoolMachine{}, }, { name: "do not add annotation to ampm", - machine: clusterv1.Machine{ + machine: clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{}, }, @@ -315,8 +314,8 @@ func TestMachineScope_updateDeleteMachineAnnotation(t *testing.T) { } machineScope.updateDeleteMachineAnnotation() - _, machineHasAnnotation := machineScope.Machine.Annotations[clusterv1.DeleteMachineAnnotation] - _, ampmHasAnnotation := machineScope.AzureMachinePoolMachine.Annotations[clusterv1.DeleteMachineAnnotation] + _, machineHasAnnotation := machineScope.Machine.Annotations[clusterv1beta1.DeleteMachineAnnotation] + _, ampmHasAnnotation := machineScope.AzureMachinePoolMachine.Annotations[clusterv1beta1.DeleteMachineAnnotation] g.Expect(machineHasAnnotation).To(Equal(ampmHasAnnotation)) }) } @@ -324,7 +323,7 @@ func TestMachineScope_updateDeleteMachineAnnotation(t *testing.T) { func TestMachineScope_UpdateNodeStatus(t *testing.T) { scheme := runtime.NewScheme() - _ = expv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1exp.AddToScheme(scheme) mockCtrl := gomock.NewController(t) @@ -354,7 +353,7 @@ func TestMachineScope_UpdateNodeStatus(t *testing.T) { g.Expect(scope.AzureMachinePoolMachine.Status.NodeRef).To(Equal(&corev1.ObjectReference{ Name: "node1", })) - assertCondition(t, scope.AzureMachinePoolMachine, conditions.TrueCondition(clusterv1.MachineNodeHealthyCondition)) + assertCondition(t, scope.AzureMachinePoolMachine, v1beta1conditions.TrueCondition(clusterv1beta1.MachineNodeHealthyCondition)) }, }, { @@ -369,7 +368,7 @@ func TestMachineScope_UpdateNodeStatus(t *testing.T) { g.Expect(scope.AzureMachinePoolMachine.Status.NodeRef).To(Equal(&corev1.ObjectReference{ Name: "node1", })) - assertCondition(t, scope.AzureMachinePoolMachine, conditions.FalseCondition(clusterv1.MachineNodeHealthyCondition, clusterv1.NodeConditionsFailedReason, clusterv1.ConditionSeverityWarning, "")) + assertCondition(t, scope.AzureMachinePoolMachine, v1beta1conditions.FalseCondition(clusterv1beta1.MachineNodeHealthyCondition, clusterv1beta1.NodeConditionsFailedReason, clusterv1beta1.ConditionSeverityWarning, "")) }, }, { @@ -387,7 +386,7 @@ func TestMachineScope_UpdateNodeStatus(t *testing.T) { return nil, ampm }, Verify: func(g *WithT, scope *MachinePoolMachineScope) { - assertCondition(t, scope.AzureMachinePoolMachine, conditions.FalseCondition(clusterv1.MachineNodeHealthyCondition, clusterv1.NodeProvisioningReason, clusterv1.ConditionSeverityInfo, "")) + assertCondition(t, scope.AzureMachinePoolMachine, v1beta1conditions.FalseCondition(clusterv1beta1.MachineNodeHealthyCondition, clusterv1beta1.NodeProvisioningReason, clusterv1beta1.ConditionSeverityInfo, "")) }, }, { @@ -406,7 +405,7 @@ func TestMachineScope_UpdateNodeStatus(t *testing.T) { g.Expect(scope.AzureMachinePoolMachine.Status.NodeRef).To(Equal(&corev1.ObjectReference{ Name: "node1", })) - assertCondition(t, scope.AzureMachinePoolMachine, conditions.TrueCondition(clusterv1.MachineNodeHealthyCondition)) + assertCondition(t, scope.AzureMachinePoolMachine, v1beta1conditions.TrueCondition(clusterv1beta1.MachineNodeHealthyCondition)) }, }, } @@ -420,17 +419,17 @@ func TestMachineScope_UpdateNodeStatus(t *testing.T) { params = MachinePoolMachineScopeParams{ Client: fake.NewClientBuilder().WithScheme(scheme).Build(), ClusterScope: clusterScope, - MachinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + MachinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("v1.19.11"), }, }, }, }, AzureMachinePool: new(infrav1exp.AzureMachinePool), - Machine: new(clusterv1.Machine), + Machine: new(clusterv1beta1.Machine), } ) @@ -503,16 +502,16 @@ func getNotReadyNode() *corev1.Node { // asserts whether a condition of type is set on the Getter object // when the condition is true, asserting the reason/severity/message // for the condition are avoided. -func assertCondition(t *testing.T, from conditions.Getter, condition *clusterv1.Condition) { +func assertCondition(t *testing.T, from v1beta1conditions.Getter, condition *clusterv1beta1.Condition) { t.Helper() g := NewWithT(t) - g.Expect(conditions.Has(from, condition.Type)).To(BeTrue()) + g.Expect(v1beta1conditions.Has(from, condition.Type)).To(BeTrue()) if condition.Status == corev1.ConditionTrue { - conditions.IsTrue(from, condition.Type) + v1beta1conditions.IsTrue(from, condition.Type) } else { - conditionToBeAsserted := conditions.Get(from, condition.Type) + conditionToBeAsserted := v1beta1conditions.Get(from, condition.Type) g.Expect(conditionToBeAsserted.Status).To(Equal(condition.Status)) g.Expect(conditionToBeAsserted.Severity).To(Equal(condition.Severity)) g.Expect(conditionToBeAsserted.Reason).To(Equal(condition.Reason)) diff --git a/azure/scope/managedcontrolplane.go b/azure/scope/managedcontrolplane.go index 0956cf6478a..a9ed35037ce 100644 --- a/azure/scope/managedcontrolplane.go +++ b/azure/scope/managedcontrolplane.go @@ -38,10 +38,10 @@ import ( clientcmdapi "k8s.io/client-go/tools/clientcmd/api" bootstrapapi "k8s.io/cluster-bootstrap/token/api" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/remote" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/secret" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -70,7 +70,7 @@ const ( type ManagedControlPlaneScopeParams struct { AzureClients Client client.Client - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster ControlPlane *infrav1.AzureManagedControlPlane ManagedMachinePools []ManagedMachinePool Cache *ManagedControlPlaneCache @@ -105,7 +105,7 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane params.Cache = &ManagedControlPlaneCache{} } - helper, err := patch.NewHelper(params.ControlPlane, params.Client) + helper, err := v1beta1patch.NewHelper(params.ControlPlane, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -125,13 +125,13 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane // ManagedControlPlaneScope defines the basic context for an actuator to operate upon. type ManagedControlPlaneScope struct { Client client.Client - PatchHelper *patch.Helper + PatchHelper *v1beta1patch.Helper adminKubeConfigData []byte userKubeConfigData []byte cache *ManagedControlPlaneCache AzureClients - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster ControlPlane *infrav1.AzureManagedControlPlane ManagedMachinePools []ManagedMachinePool azure.AsyncReconciler @@ -235,13 +235,13 @@ func (s *ManagedControlPlaneScope) PatchObject(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "scope.ManagedControlPlaneScope.PatchObject") defer done() - conditions.SetSummary(s.ControlPlane) + v1beta1conditions.SetSummary(s.ControlPlane) return s.PatchHelper.Patch( ctx, s.ControlPlane, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - clusterv1.ReadyCondition, + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + clusterv1beta1.ReadyCondition, infrav1.ResourceGroupReadyCondition, infrav1.VNetReadyCondition, infrav1.SubnetsReadyCondition, @@ -759,7 +759,7 @@ func (s *ManagedControlPlaneScope) GetAllAgentPoolSpecs() ([]azure.ASOResourceSp } // SetControlPlaneEndpoint sets a control plane endpoint. -func (s *ManagedControlPlaneScope) SetControlPlaneEndpoint(endpoint clusterv1.APIEndpoint) { +func (s *ManagedControlPlaneScope) SetControlPlaneEndpoint(endpoint clusterv1beta1.APIEndpoint) { s.ControlPlane.Spec.ControlPlaneEndpoint.Host = endpoint.Host s.ControlPlane.Spec.ControlPlaneEndpoint.Port = endpoint.Port } @@ -773,7 +773,7 @@ func (s *ManagedControlPlaneScope) MakeEmptyKubeConfigSecret() corev1.Secret { OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(s.ControlPlane, infrav1.GroupVersion.WithKind(infrav1.AzureManagedControlPlaneKind)), }, - Labels: map[string]string{clusterv1.ClusterNameLabel: s.Cluster.Name}, + Labels: map[string]string{clusterv1beta1.ClusterNameLabel: s.Cluster.Name}, }, } } @@ -873,38 +873,38 @@ func (s *ManagedControlPlaneScope) DeleteLongRunningOperationState(name, service } // UpdateDeleteStatus updates a condition on the AzureManagedControlPlane status after a DELETE operation. -func (s *ManagedControlPlaneScope) UpdateDeleteStatus(condition clusterv1.ConditionType, service string, err error) { +func (s *ManagedControlPlaneScope) UpdateDeleteStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkFalse(s.ControlPlane, condition, infrav1.DeletedReason, clusterv1.ConditionSeverityInfo, "%s successfully deleted", service) + v1beta1conditions.MarkFalse(s.ControlPlane, condition, infrav1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "%s successfully deleted", service) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(s.ControlPlane, condition, infrav1.DeletingReason, clusterv1.ConditionSeverityInfo, "%s deleting", service) + v1beta1conditions.MarkFalse(s.ControlPlane, condition, infrav1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "%s deleting", service) default: - conditions.MarkFalse(s.ControlPlane, condition, infrav1.DeletionFailedReason, clusterv1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(s.ControlPlane, condition, infrav1.DeletionFailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error()) } } // UpdatePutStatus updates a condition on the AzureManagedControlPlane status after a PUT operation. -func (s *ManagedControlPlaneScope) UpdatePutStatus(condition clusterv1.ConditionType, service string, err error) { +func (s *ManagedControlPlaneScope) UpdatePutStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkTrue(s.ControlPlane, condition) + v1beta1conditions.MarkTrue(s.ControlPlane, condition) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(s.ControlPlane, condition, infrav1.CreatingReason, clusterv1.ConditionSeverityInfo, "%s creating or updating", service) + v1beta1conditions.MarkFalse(s.ControlPlane, condition, infrav1.CreatingReason, clusterv1beta1.ConditionSeverityInfo, "%s creating or updating", service) default: - conditions.MarkFalse(s.ControlPlane, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(s.ControlPlane, condition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error()) } } // UpdatePatchStatus updates a condition on the AzureManagedControlPlane status after a PATCH operation. -func (s *ManagedControlPlaneScope) UpdatePatchStatus(condition clusterv1.ConditionType, service string, err error) { +func (s *ManagedControlPlaneScope) UpdatePatchStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkTrue(s.ControlPlane, condition) + v1beta1conditions.MarkTrue(s.ControlPlane, condition) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(s.ControlPlane, condition, infrav1.UpdatingReason, clusterv1.ConditionSeverityInfo, "%s updating", service) + v1beta1conditions.MarkFalse(s.ControlPlane, condition, infrav1.UpdatingReason, clusterv1beta1.ConditionSeverityInfo, "%s updating", service) default: - conditions.MarkFalse(s.ControlPlane, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(s.ControlPlane, condition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) } } @@ -944,7 +944,7 @@ func (s *ManagedControlPlaneScope) SetAnnotation(key, value string) { } // AvailabilityStatusResource refers to the AzureManagedControlPlane. -func (s *ManagedControlPlaneScope) AvailabilityStatusResource() conditions.Setter { +func (s *ManagedControlPlaneScope) AvailabilityStatusResource() v1beta1conditions.Setter { return s.ControlPlane } @@ -955,10 +955,10 @@ func (s *ManagedControlPlaneScope) AvailabilityStatusResourceURI() string { // AvailabilityStatusFilter ignores the health metrics connection error that // occurs on startup for every AKS cluster. -func (s *ManagedControlPlaneScope) AvailabilityStatusFilter(cond *clusterv1.Condition) *clusterv1.Condition { +func (s *ManagedControlPlaneScope) AvailabilityStatusFilter(cond *clusterv1beta1.Condition) *clusterv1beta1.Condition { if time.Since(s.ControlPlane.CreationTimestamp.Time) < resourceHealthWarningInitialGracePeriod && - cond.Severity == clusterv1.ConditionSeverityWarning { - return conditions.TrueCondition(infrav1.AzureResourceAvailableCondition) + cond.Severity == clusterv1beta1.ConditionSeverityWarning { + return v1beta1conditions.TrueCondition(infrav1.AzureResourceAvailableCondition) } return cond } diff --git a/azure/scope/managedcontrolplane_test.go b/azure/scope/managedcontrolplane_test.go index d4ec2b5a4cc..832a2b97b5d 100644 --- a/azure/scope/managedcontrolplane_test.go +++ b/azure/scope/managedcontrolplane_test.go @@ -29,8 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -46,12 +45,12 @@ func TestNewManagedControlPlaneScope(t *testing.T) { g := NewWithT(t) scheme := runtime.NewScheme() - _ = expv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) _ = corev1.AddToScheme(scheme) input := ManagedControlPlaneScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -101,7 +100,7 @@ func TestManagedControlPlaneScope_OutboundType(t *testing.T) { { Name: "With Explicit OutboundType defined", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -120,7 +119,7 @@ func TestManagedControlPlaneScope_OutboundType(t *testing.T) { { Name: "Without OutboundType defined", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -156,7 +155,7 @@ func TestManagedControlPlaneScope_PoolVersion(t *testing.T) { { Name: "Without Version", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -195,7 +194,7 @@ func TestManagedControlPlaneScope_PoolVersion(t *testing.T) { { Name: "With Version", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -236,7 +235,7 @@ func TestManagedControlPlaneScope_PoolVersion(t *testing.T) { { Name: "With bad version", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -287,7 +286,7 @@ func TestManagedControlPlaneScope_AddonProfiles(t *testing.T) { { Name: "Without add-ons", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -312,7 +311,7 @@ func TestManagedControlPlaneScope_AddonProfiles(t *testing.T) { { Name: "With add-ons", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -365,7 +364,7 @@ func TestManagedControlPlaneScope_OSType(t *testing.T) { { Name: "with Linux and Windows pools", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -433,7 +432,7 @@ func TestManagedControlPlaneScope_OSType(t *testing.T) { { Name: "system pool required", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -481,7 +480,7 @@ func TestManagedControlPlaneScope_OSType(t *testing.T) { func TestManagedControlPlaneScope_IsVnetManagedCache(t *testing.T) { scheme := runtime.NewScheme() - _ = expv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) _ = corev1.AddToScheme(scheme) @@ -493,7 +492,7 @@ func TestManagedControlPlaneScope_IsVnetManagedCache(t *testing.T) { { Name: "no Cache value", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -535,7 +534,7 @@ func TestManagedControlPlaneScope_IsVnetManagedCache(t *testing.T) { { Name: "with Cache value of true", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -577,7 +576,7 @@ func TestManagedControlPlaneScope_IsVnetManagedCache(t *testing.T) { { Name: "with Cache value of false", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -640,7 +639,7 @@ func TestManagedControlPlaneScope_AADProfile(t *testing.T) { { Name: "Without AADProfile", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -665,7 +664,7 @@ func TestManagedControlPlaneScope_AADProfile(t *testing.T) { { Name: "With AADProfile", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -719,7 +718,7 @@ func TestManagedControlPlaneScope_DisableLocalAccounts(t *testing.T) { { Name: "Without DisableLocalAccounts", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -744,7 +743,7 @@ func TestManagedControlPlaneScope_DisableLocalAccounts(t *testing.T) { { Name: "Without AAdProfile and With DisableLocalAccounts", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -773,7 +772,7 @@ func TestManagedControlPlaneScope_DisableLocalAccounts(t *testing.T) { { Name: "With AAdProfile and With DisableLocalAccounts", Scope: &ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -921,7 +920,7 @@ func TestManagedControlPlaneScope_PrivateEndpointSpecs(t *testing.T) { { Name: "returns empty private endpoints list if no subnets are specified", Input: ManagedControlPlaneScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -945,7 +944,7 @@ func TestManagedControlPlaneScope_PrivateEndpointSpecs(t *testing.T) { { Name: "returns empty private endpoints list if no private endpoints are specified", Input: ManagedControlPlaneScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -975,7 +974,7 @@ func TestManagedControlPlaneScope_PrivateEndpointSpecs(t *testing.T) { { Name: "returns list of private endpoint specs if private endpoints are specified", Input: ManagedControlPlaneScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "dummy-ns", @@ -1083,7 +1082,7 @@ func TestManagedControlPlaneScope_AKSExtensionSpecs(t *testing.T) { { Name: "returns empty AKS extensions list if no extensions are specified", Input: ManagedControlPlaneScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "dummy-ns", @@ -1103,7 +1102,7 @@ func TestManagedControlPlaneScope_AKSExtensionSpecs(t *testing.T) { { Name: "returns list of AKS extensions if extensions are specified", Input: ManagedControlPlaneScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "dummy-ns", @@ -1185,7 +1184,7 @@ func TestManagedControlPlaneScope_AutoUpgradeProfile(t *testing.T) { { name: "Without AutoUpgradeProfile", input: ManagedControlPlaneScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -1214,7 +1213,7 @@ func TestManagedControlPlaneScope_AutoUpgradeProfile(t *testing.T) { { name: "With AutoUpgradeProfile UpgradeChannelNodeImage", input: ManagedControlPlaneScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -1270,7 +1269,7 @@ func TestManagedControlPlaneScope_GroupSpecs(t *testing.T) { { name: "virtualNetwork belongs to a different resource group", input: ManagedControlPlaneScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, @@ -1306,7 +1305,7 @@ func TestManagedControlPlaneScope_GroupSpecs(t *testing.T) { { name: "virtualNetwork belongs to a same resource group", input: ManagedControlPlaneScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, @@ -1335,7 +1334,7 @@ func TestManagedControlPlaneScope_GroupSpecs(t *testing.T) { { name: "virtualNetwork resource group not specified", input: ManagedControlPlaneScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -1365,7 +1364,7 @@ func TestManagedControlPlaneScope_GroupSpecs(t *testing.T) { { name: "virtualNetwork belongs to different resource group with non-k8s name", input: ManagedControlPlaneScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", diff --git a/azure/scope/managedmachinepool.go b/azure/scope/managedmachinepool.go index f6093c63654..1683c1d4d2a 100644 --- a/azure/scope/managedmachinepool.go +++ b/azure/scope/managedmachinepool.go @@ -24,10 +24,9 @@ import ( "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/pkg/errors" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -43,7 +42,7 @@ import ( type ManagedMachinePoolScopeParams struct { ManagedMachinePool Client client.Client - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster ControlPlane *infrav1.AzureManagedControlPlane ManagedControlPlaneScope azure.ManagedClusterScoper } @@ -51,7 +50,7 @@ type ManagedMachinePoolScopeParams struct { // ManagedMachinePool defines the scope interface for a managed machine pool. type ManagedMachinePool struct { InfraMachinePool *infrav1.AzureManagedMachinePool - MachinePool *expv1.MachinePool + MachinePool *clusterv1beta1.MachinePool } // NewManagedMachinePoolScope creates a new Scope from the supplied parameters. @@ -68,12 +67,12 @@ func NewManagedMachinePoolScope(ctx context.Context, params ManagedMachinePoolSc return nil, errors.New("failed to generate new scope from nil ControlPlane") } - helper, err := patch.NewHelper(params.InfraMachinePool, params.Client) + helper, err := v1beta1patch.NewHelper(params.InfraMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } - capiMachinePoolPatchHelper, err := patch.NewHelper(params.MachinePool, params.Client) + capiMachinePoolPatchHelper, err := v1beta1patch.NewHelper(params.MachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -93,12 +92,12 @@ func NewManagedMachinePoolScope(ctx context.Context, params ManagedMachinePoolSc // ManagedMachinePoolScope defines the basic context for an actuator to operate upon. type ManagedMachinePoolScope struct { Client client.Client - patchHelper *patch.Helper - capiMachinePoolPatchHelper *patch.Helper + patchHelper *v1beta1patch.Helper + capiMachinePoolPatchHelper *v1beta1patch.Helper azure.ManagedClusterScoper - Cluster *clusterv1.Cluster - MachinePool *expv1.MachinePool + Cluster *clusterv1beta1.Cluster + MachinePool *clusterv1beta1.MachinePool ControlPlane *infrav1.AzureManagedControlPlane InfraMachinePool *infrav1.AzureManagedMachinePool } @@ -108,13 +107,13 @@ func (s *ManagedMachinePoolScope) PatchObject(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "scope.ManagedMachinePoolScope.PatchObject") defer done() - conditions.SetSummary(s.InfraMachinePool) + v1beta1conditions.SetSummary(s.InfraMachinePool) return s.patchHelper.Patch( ctx, s.InfraMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - clusterv1.ReadyCondition, + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + clusterv1beta1.ReadyCondition, }}) } @@ -159,7 +158,7 @@ func getAgentPoolSubnet(controlPlane *infrav1.AzureManagedControlPlane, infraMac } func buildAgentPoolSpec(managedControlPlane *infrav1.AzureManagedControlPlane, - machinePool *expv1.MachinePool, + machinePool *clusterv1beta1.MachinePool, managedMachinePool *infrav1.AzureManagedMachinePool) azure.ASOResourceSpecGetter[genruntime.MetaObject] { normalizedVersion := getManagedMachinePoolVersion(managedControlPlane, machinePool) @@ -282,38 +281,38 @@ func (s *ManagedMachinePoolScope) DeleteLongRunningOperationState(name, service, } // UpdateDeleteStatus updates a condition on the AzureManagedControlPlane status after a DELETE operation. -func (s *ManagedMachinePoolScope) UpdateDeleteStatus(condition clusterv1.ConditionType, service string, err error) { +func (s *ManagedMachinePoolScope) UpdateDeleteStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.DeletedReason, clusterv1.ConditionSeverityInfo, "%s successfully deleted", service) + v1beta1conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "%s successfully deleted", service) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.DeletingReason, clusterv1.ConditionSeverityInfo, "%s deleting", service) + v1beta1conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "%s deleting", service) default: - conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.DeletionFailedReason, clusterv1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.DeletionFailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error()) } } // UpdatePutStatus updates a condition on the AzureManagedMachinePool status after a PUT operation. -func (s *ManagedMachinePoolScope) UpdatePutStatus(condition clusterv1.ConditionType, service string, err error) { +func (s *ManagedMachinePoolScope) UpdatePutStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkTrue(s.InfraMachinePool, condition) + v1beta1conditions.MarkTrue(s.InfraMachinePool, condition) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.CreatingReason, clusterv1.ConditionSeverityInfo, "%s creating or updating", service) + v1beta1conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.CreatingReason, clusterv1beta1.ConditionSeverityInfo, "%s creating or updating", service) default: - conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error()) } } // UpdatePatchStatus updates a condition on the AzureManagedMachinePool status after a PATCH operation. -func (s *ManagedMachinePoolScope) UpdatePatchStatus(condition clusterv1.ConditionType, service string, err error) { +func (s *ManagedMachinePoolScope) UpdatePatchStatus(condition clusterv1beta1.ConditionType, service string, err error) { switch { case err == nil: - conditions.MarkTrue(s.InfraMachinePool, condition) + v1beta1conditions.MarkTrue(s.InfraMachinePool, condition) case azure.IsOperationNotDoneError(err): - conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.UpdatingReason, clusterv1.ConditionSeverityInfo, "%s updating", service) + v1beta1conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.UpdatingReason, clusterv1beta1.ConditionSeverityInfo, "%s updating", service) default: - conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) + v1beta1conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) } } @@ -353,7 +352,7 @@ func (s *ManagedMachinePoolScope) GetCAPIMachinePoolAnnotation(key string) (succ return ok, val } -func getManagedMachinePoolVersion(managedControlPlane *infrav1.AzureManagedControlPlane, machinePool *expv1.MachinePool) *string { +func getManagedMachinePoolVersion(managedControlPlane *infrav1.AzureManagedControlPlane, machinePool *clusterv1beta1.MachinePool) *string { var v, av string if machinePool != nil { v = ptr.Deref(machinePool.Spec.Template.Spec.Version, "") diff --git a/azure/scope/managedmachinepool_test.go b/azure/scope/managedmachinepool_test.go index bfbba40de62..299d977a495 100644 --- a/azure/scope/managedmachinepool_test.go +++ b/azure/scope/managedmachinepool_test.go @@ -27,8 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -38,11 +37,11 @@ import ( func TestNewManagedMachinePoolScope(t *testing.T) { scheme := runtime.NewScheme() - _ = expv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) input := ManagedMachinePoolScopeParams{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: "default", @@ -752,7 +751,7 @@ func TestManagedMachinePoolScope_EnablePreviewFeatures(t *testing.T) { }, }, }, - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, InfraMachinePool: &infrav1.AzureManagedMachinePool{}, } agentPoolGetter := s.AgentPoolSpec() @@ -768,7 +767,7 @@ func Test_getManagedMachinePoolVersion(t *testing.T) { cases := []struct { name string managedControlPlane *infrav1.AzureManagedControlPlane - machinePool *expv1.MachinePool + machinePool *clusterv1beta1.MachinePool expected *string }{ { @@ -786,10 +785,10 @@ func Test_getManagedMachinePoolVersion(t *testing.T) { { name: "Only machine pool is available", managedControlPlane: nil, - machinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + machinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("v1.15.0"), }, }, @@ -800,10 +799,10 @@ func Test_getManagedMachinePoolVersion(t *testing.T) { { name: "Only machine pool is available and cp is nil", managedControlPlane: nil, - machinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + machinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("v1.15.0"), }, }, @@ -818,10 +817,10 @@ func Test_getManagedMachinePoolVersion(t *testing.T) { AutoUpgradeVersion: "1.20.3", }, }, - machinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + machinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("v1.15.0"), }, }, @@ -836,10 +835,10 @@ func Test_getManagedMachinePoolVersion(t *testing.T) { AutoUpgradeVersion: "v1.20.3", }, }, - machinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + machinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("v1.15.0"), }, }, @@ -854,10 +853,10 @@ func Test_getManagedMachinePoolVersion(t *testing.T) { AutoUpgradeVersion: "v1.20.3", }, }, - machinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + machinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("v1.21.0"), }, }, @@ -886,7 +885,7 @@ func getAzureMachinePool(name string, mode infrav1.NodePoolMode) *infrav1.AzureM Name: name, Namespace: "default", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster1", + clusterv1beta1.ClusterNameLabel: "cluster1", }, OwnerReferences: []metav1.OwnerReference{ { @@ -957,16 +956,16 @@ func getAzureMachinePoolWithAdditionalTags(name string, additionalTags infrav1.T return managedPool } -func getMachinePool(name string) *expv1.MachinePool { - return &expv1.MachinePool{ +func getMachinePool(name string) *clusterv1beta1.MachinePool { + return &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "default", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster1", + clusterv1beta1.ClusterNameLabel: "cluster1", }, }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ ClusterName: "cluster1", }, } @@ -984,7 +983,7 @@ func getWindowsAzureMachinePool(name string) *infrav1.AzureManagedMachinePool { return managedPool } -func getMachinePoolWithVersion(name, version string) *expv1.MachinePool { +func getMachinePoolWithVersion(name, version string) *clusterv1beta1.MachinePool { machine := getMachinePool(name) machine.Spec.Template.Spec.Version = ptr.To(version) return machine diff --git a/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy.go b/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy.go index d213060388b..aef444fabfb 100644 --- a/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy.go +++ b/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy.go @@ -24,7 +24,7 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/intstr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -145,7 +145,7 @@ func (rollingUpdateStrategy rollingUpdateStrategy) SelectMachinesToDelete(ctx co }() ) - // Order AzureMachinePoolMachines with the clusterv1.DeleteMachineAnnotation to the front so that they have delete priority. + // Order AzureMachinePoolMachines with the clusterv1beta1.DeleteMachineAnnotation to the front so that they have delete priority. // This allows MachinePool Machines to work with the autoscaler. failedMachines = orderByDeleteMachineAnnotation(failedMachines) deletingMachines = orderByDeleteMachineAnnotation(deletingMachines) @@ -235,7 +235,7 @@ func getDeleteAnnotatedMachines(machinesByProviderID map[string]infrav1exp.Azure var machines []infrav1exp.AzureMachinePoolMachine for _, v := range machinesByProviderID { if v.Annotations != nil { - if _, hasDeleteAnnotation := v.Annotations[clusterv1.DeleteMachineAnnotation]; hasDeleteAnnotation { + if _, hasDeleteAnnotation := v.Annotations[clusterv1beta1.DeleteMachineAnnotation]; hasDeleteAnnotation { machines = append(machines, v) } } @@ -323,11 +323,11 @@ func orderRandom(machines []infrav1exp.AzureMachinePoolMachine) []infrav1exp.Azu return machines } -// orderByDeleteMachineAnnotation will sort AzureMachinePoolMachines with the clusterv1.DeleteMachineAnnotation to the front of the list. +// orderByDeleteMachineAnnotation will sort AzureMachinePoolMachines with the clusterv1beta1.DeleteMachineAnnotation to the front of the list. // It will preserve the existing order of the list otherwise so that it respects the existing delete priority otherwise. func orderByDeleteMachineAnnotation(machines []infrav1exp.AzureMachinePoolMachine) []infrav1exp.AzureMachinePoolMachine { sort.SliceStable(machines, func(i, _ int) bool { - _, iHasAnnotation := machines[i].Annotations[clusterv1.DeleteMachineAnnotation] + _, iHasAnnotation := machines[i].Annotations[clusterv1beta1.DeleteMachineAnnotation] return iHasAnnotation }) diff --git a/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy_test.go b/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy_test.go index e741c866a66..cf4df66ce2d 100644 --- a/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy_test.go +++ b/azure/scope/strategies/machinepool_deployments/machinepool_deployment_strategy_test.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" @@ -441,7 +441,7 @@ func makeAMPM(opts ampmOptions) infrav1exp.AzureMachinePoolMachine { } if opts.HasDeleteMachineAnnotation { - ampm.Annotations[clusterv1.DeleteMachineAnnotation] = "true" + ampm.Annotations[clusterv1beta1.DeleteMachineAnnotation] = "true" } return ampm diff --git a/azure/services/agentpools/agentpools.go b/azure/services/agentpools/agentpools.go index 4db6410a8bc..ce6058bd73d 100644 --- a/azure/services/agentpools/agentpools.go +++ b/azure/services/agentpools/agentpools.go @@ -22,7 +22,7 @@ import ( asocontainerservicev1hub "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20240901/storage" "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/conversion" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -69,10 +69,10 @@ func postCreateOrUpdateResourceHook(_ context.Context, scope AgentPoolScope, obj // When autoscaling is set, add the annotation to the machine pool and update the replica count. if ptr.Deref(agentPool.Status.EnableAutoScaling, false) { - scope.SetCAPIMachinePoolAnnotation(clusterv1.ReplicasManagedByAnnotation, "true") + scope.SetCAPIMachinePoolAnnotation(clusterv1beta1.ReplicasManagedByAnnotation, "true") scope.SetCAPIMachinePoolReplicas(agentPool.Status.Count) } else { // Otherwise, remove the annotation. - scope.RemoveCAPIMachinePoolAnnotation(clusterv1.ReplicasManagedByAnnotation) + scope.RemoveCAPIMachinePoolAnnotation(clusterv1beta1.ReplicasManagedByAnnotation) } return nil } diff --git a/azure/services/agentpools/agentpools_test.go b/azure/services/agentpools/agentpools_test.go index bcd82437422..e4964c52108 100644 --- a/azure/services/agentpools/agentpools_test.go +++ b/azure/services/agentpools/agentpools_test.go @@ -25,7 +25,7 @@ import ( "github.com/pkg/errors" "go.uber.org/mock/gomock" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure/services/agentpools/mock_agentpools" ) @@ -45,7 +45,7 @@ func TestPostCreateOrUpdateResourceHook(t *testing.T) { mockCtrl := gomock.NewController(t) scope := mock_agentpools.NewMockAgentPoolScope(mockCtrl) - scope.EXPECT().RemoveCAPIMachinePoolAnnotation(clusterv1.ReplicasManagedByAnnotation) + scope.EXPECT().RemoveCAPIMachinePoolAnnotation(clusterv1beta1.ReplicasManagedByAnnotation) managedCluster := &asocontainerservicev1.ManagedClustersAgentPool{ Status: asocontainerservicev1.ManagedClustersAgentPool_STATUS{ @@ -62,7 +62,7 @@ func TestPostCreateOrUpdateResourceHook(t *testing.T) { mockCtrl := gomock.NewController(t) scope := mock_agentpools.NewMockAgentPoolScope(mockCtrl) - scope.EXPECT().SetCAPIMachinePoolAnnotation(clusterv1.ReplicasManagedByAnnotation, "true") + scope.EXPECT().SetCAPIMachinePoolAnnotation(clusterv1beta1.ReplicasManagedByAnnotation, "true") scope.EXPECT().SetCAPIMachinePoolReplicas(ptr.To(1234)) managedCluster := &asocontainerservicev1.ManagedClustersAgentPool{ @@ -81,7 +81,7 @@ func TestPostCreateOrUpdateResourceHook(t *testing.T) { mockCtrl := gomock.NewController(t) scope := mock_agentpools.NewMockAgentPoolScope(mockCtrl) - scope.EXPECT().SetCAPIMachinePoolAnnotation(clusterv1.ReplicasManagedByAnnotation, "true") + scope.EXPECT().SetCAPIMachinePoolAnnotation(clusterv1beta1.ReplicasManagedByAnnotation, "true") scope.EXPECT().SetCAPIMachinePoolReplicas(ptr.To(1234)) agentPool := &asocontainerservicev1preview.ManagedClustersAgentPool{ diff --git a/azure/services/agentpools/mock_agentpools/agentpools_mock.go b/azure/services/agentpools/mock_agentpools/agentpools_mock.go index d5605eb18f9..38d29c83e5c 100644 --- a/azure/services/agentpools/mock_agentpools/agentpools_mock.go +++ b/azure/services/agentpools/mock_agentpools/agentpools_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" client "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/azure/services/aso/aso.go b/azure/services/aso/aso.go index 36e3b4ad322..ad99a38274b 100644 --- a/azure/services/aso/aso.go +++ b/azure/services/aso/aso.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/yaml" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -176,7 +176,7 @@ func (r *reconciler[T]) CreateOrUpdateResource(ctx context.Context, spec azure.A if labels == nil { labels = make(map[string]string) } - labels[clusterv1.ClusterNameLabel] = r.clusterName + labels[clusterv1beta1.ClusterNameLabel] = r.clusterName annotations := parameters.GetAnnotations() if annotations == nil { diff --git a/azure/services/aso/aso_test.go b/azure/services/aso/aso_test.go index 81f364549b7..b6e68d98e8f 100644 --- a/azure/services/aso/aso_test.go +++ b/azure/services/aso/aso_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -125,7 +125,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, }, Status: asoresourcesv1.ResourceGroup_STATUS{}, @@ -212,7 +212,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Annotations: map[string]string{ asoannotations.PerResourceSecret: "cluster-aso-secret", @@ -268,7 +268,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Annotations: map[string]string{ asoannotations.PerResourceSecret: "cluster-aso-secret", @@ -320,7 +320,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Annotations: map[string]string{ asoannotations.PerResourceSecret: "cluster-aso-secret", @@ -402,7 +402,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, }, Status: asoresourcesv1.ResourceGroup_STATUS{ @@ -449,7 +449,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Annotations: map[string]string{ asoannotations.ReconcilePolicy: string(asoannotations.ReconcilePolicySkip), @@ -508,7 +508,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Annotations: map[string]string{ asoannotations.ReconcilePolicy: string(asoannotations.ReconcilePolicySkip), @@ -565,8 +565,8 @@ func TestCreateOrUpdateResource(t *testing.T) { Name: "name", Namespace: "namespace", Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, - infrav1.OwnedByClusterLabelKey: clusterName, //nolint:staticcheck // Referencing this deprecated value is required for backwards compatibility. + clusterv1beta1.ClusterNameLabel: clusterName, + infrav1.OwnedByClusterLabelKey: clusterName, //nolint:staticcheck // Referencing this deprecated value is required for backwards compatibility. }, Annotations: map[string]string{ asoannotations.ReconcilePolicy: string(asoannotations.ReconcilePolicySkip), @@ -621,7 +621,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, }, Status: asoresourcesv1.ResourceGroup_STATUS{ @@ -664,7 +664,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Name: "name", Namespace: "namespace", Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, }, Status: asoresourcesv1.ResourceGroup_STATUS{ @@ -711,7 +711,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Annotations: map[string]string{ asoannotations.ReconcilePolicy: string(asoannotations.ReconcilePolicyManage), @@ -770,7 +770,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, }, Status: asoresourcesv1.ResourceGroup_STATUS{ @@ -828,7 +828,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Annotations: map[string]string{ asoannotations.ReconcilePolicy: string(asoannotations.ReconcilePolicyManage), @@ -887,7 +887,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Annotations: map[string]string{ asoannotations.ReconcilePolicy: string(asoannotations.ReconcilePolicyManage), @@ -938,7 +938,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Annotations: map[string]string{ prePauseReconcilePolicyAnnotation: string(asoannotations.ReconcilePolicyManage), @@ -1060,7 +1060,7 @@ func TestCreateOrUpdateResource(t *testing.T) { Namespace: "namespace", OwnerReferences: ownerRefs(), Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Annotations: map[string]string{ asoannotations.ReconcilePolicy: string(asoannotations.ReconcilePolicyManage), diff --git a/azure/services/aso/mock_aso/aso_mock.go b/azure/services/aso/mock_aso/aso_mock.go index 07423b710a0..557bd32ddb5 100644 --- a/azure/services/aso/mock_aso/aso_mock.go +++ b/azure/services/aso/mock_aso/aso_mock.go @@ -34,7 +34,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" client "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/azure/services/aso/service.go b/azure/services/aso/service.go index bb8ebe09cb7..0624b755e2e 100644 --- a/azure/services/aso/service.go +++ b/azure/services/aso/service.go @@ -21,7 +21,7 @@ import ( "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/pkg/errors" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -39,7 +39,7 @@ type Service[T genruntime.MetaObject, S Scope] struct { // may be skipped for a service by leaving this field nil. ListFunc func(ctx context.Context, client client.Client, opts ...client.ListOption) (resources []T, err error) - ConditionType clusterv1.ConditionType + ConditionType clusterv1beta1.ConditionType PostCreateOrUpdateResourceHook func(ctx context.Context, scope S, result T, err error) error PostReconcileHook func(ctx context.Context, scope S, err error) error PostDeleteHook func(ctx context.Context, scope S, err error) error diff --git a/azure/services/aso/service_test.go b/azure/services/aso/service_test.go index d9af64540a0..b50c65ed8df 100644 --- a/azure/services/aso/service_test.go +++ b/azure/services/aso/service_test.go @@ -26,7 +26,7 @@ import ( "go.uber.org/mock/gomock" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -40,7 +40,7 @@ import ( const ( serviceName = "test" - conditionType = clusterv1.ConditionType("Test") + conditionType = clusterv1beta1.ConditionType("Test") ) func TestServiceReconcile(t *testing.T) { diff --git a/azure/services/async/mock_async/async_mock.go b/azure/services/async/mock_async/async_mock.go index 76ea8e9d171..cbccc374eff 100644 --- a/azure/services/async/mock_async/async_mock.go +++ b/azure/services/async/mock_async/async_mock.go @@ -35,7 +35,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockFutureScope is a mock of FutureScope interface. diff --git a/azure/services/availabilitysets/mock_availabilitysets/availabilitysets_mock.go b/azure/services/availabilitysets/mock_availabilitysets/availabilitysets_mock.go index 056913891e6..1f6343206af 100644 --- a/azure/services/availabilitysets/mock_availabilitysets/availabilitysets_mock.go +++ b/azure/services/availabilitysets/mock_availabilitysets/availabilitysets_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockAvailabilitySetScope is a mock of AvailabilitySetScope interface. diff --git a/azure/services/disks/mock_disks/disks_mock.go b/azure/services/disks/mock_disks/disks_mock.go index 9947156ccd1..5b2c79177cb 100644 --- a/azure/services/disks/mock_disks/disks_mock.go +++ b/azure/services/disks/mock_disks/disks_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockDiskScope is a mock of DiskScope interface. diff --git a/azure/services/groups/mock_groups/groups_mock.go b/azure/services/groups/mock_groups/groups_mock.go index 34152ae97ec..81bd7f3d6b4 100644 --- a/azure/services/groups/mock_groups/groups_mock.go +++ b/azure/services/groups/mock_groups/groups_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" client "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/azure/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go b/azure/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go index 4b9fe77ac4a..c4680028eb3 100644 --- a/azure/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go +++ b/azure/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockInboundNatScope is a mock of InboundNatScope interface. diff --git a/azure/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go b/azure/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go index 30af94dbabe..47118c0afd1 100644 --- a/azure/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go +++ b/azure/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go @@ -34,7 +34,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" client "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/azure/services/managedclusters/managedclusters.go b/azure/services/managedclusters/managedclusters.go index 8eac771e0a4..3b50cff0cf6 100644 --- a/azure/services/managedclusters/managedclusters.go +++ b/azure/services/managedclusters/managedclusters.go @@ -27,7 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/clientcmd" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/secret" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/conversion" @@ -54,7 +54,7 @@ type ManagedClusterScope interface { aso.Scope azure.Authorizer ManagedClusterSpec() azure.ASOResourceSpecGetter[genruntime.MetaObject] - SetControlPlaneEndpoint(clusterv1.APIEndpoint) + SetControlPlaneEndpoint(clusterv1beta1.APIEndpoint) MakeEmptyKubeConfigSecret() corev1.Secret GetAdminKubeconfigData() []byte SetAdminKubeconfigData([]byte) @@ -92,14 +92,14 @@ func postCreateOrUpdateResourceHook(ctx context.Context, scope ManagedClusterSco } // Update control plane endpoint. - endpoint := clusterv1.APIEndpoint{ + endpoint := clusterv1beta1.APIEndpoint{ Host: ptr.Deref(managedCluster.Status.Fqdn, ""), Port: 443, } if managedCluster.Status.ApiServerAccessProfile != nil && ptr.Deref(managedCluster.Status.ApiServerAccessProfile.EnablePrivateCluster, false) && !ptr.Deref(managedCluster.Status.ApiServerAccessProfile.EnablePrivateClusterPublicFQDN, false) { - endpoint = clusterv1.APIEndpoint{ + endpoint = clusterv1beta1.APIEndpoint{ Host: ptr.Deref(managedCluster.Status.PrivateFQDN, ""), Port: 443, } diff --git a/azure/services/managedclusters/managedclusters_test.go b/azure/services/managedclusters/managedclusters_test.go index db73de3f418..02417242886 100644 --- a/azure/services/managedclusters/managedclusters_test.go +++ b/azure/services/managedclusters/managedclusters_test.go @@ -27,7 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/secret" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -114,7 +114,7 @@ func TestPostCreateOrUpdateResourceHook(t *testing.T) { Build() scope.EXPECT().GetClient().Return(kclient).AnyTimes() - scope.EXPECT().SetControlPlaneEndpoint(clusterv1.APIEndpoint{ + scope.EXPECT().SetControlPlaneEndpoint(clusterv1beta1.APIEndpoint{ Host: "private fqdn", Port: 443, }) @@ -170,7 +170,7 @@ func setupMockScope(t *testing.T) *mock_managedclusters.MockManagedClusterScope Build() scope.EXPECT().GetClient().Return(kclient).AnyTimes() - scope.EXPECT().SetControlPlaneEndpoint(clusterv1.APIEndpoint{ + scope.EXPECT().SetControlPlaneEndpoint(clusterv1beta1.APIEndpoint{ Host: "fdqn", Port: 443, }) diff --git a/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go b/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go index f95e84bca4d..23954960758 100644 --- a/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go +++ b/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go @@ -36,7 +36,7 @@ import ( v1 "k8s.io/api/core/v1" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" client "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/azure/services/natgateways/mock_natgateways/natgateways_mock.go b/azure/services/natgateways/mock_natgateways/natgateways_mock.go index 1ba5f8d3eee..f8db8e538ae 100644 --- a/azure/services/natgateways/mock_natgateways/natgateways_mock.go +++ b/azure/services/natgateways/mock_natgateways/natgateways_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" client "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/azure/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go b/azure/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go index 903d0a42ba1..b205814e11d 100644 --- a/azure/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go +++ b/azure/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockNICScope is a mock of NICScope interface. diff --git a/azure/services/privatedns/mock_privatedns/privatedns_mock.go b/azure/services/privatedns/mock_privatedns/privatedns_mock.go index 914756fe512..a5ab0651faa 100644 --- a/azure/services/privatedns/mock_privatedns/privatedns_mock.go +++ b/azure/services/privatedns/mock_privatedns/privatedns_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockScope is a mock of Scope interface. diff --git a/azure/services/privateendpoints/mock_privateendpoints/privateendpoints_mock.go b/azure/services/privateendpoints/mock_privateendpoints/privateendpoints_mock.go index 801e5d3b283..c7480eaf245 100644 --- a/azure/services/privateendpoints/mock_privateendpoints/privateendpoints_mock.go +++ b/azure/services/privateendpoints/mock_privateendpoints/privateendpoints_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" client "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/azure/services/publicips/mock_publicips/publicips_mock.go b/azure/services/publicips/mock_publicips/publicips_mock.go index b22069da914..c9c33b5b2fc 100644 --- a/azure/services/publicips/mock_publicips/publicips_mock.go +++ b/azure/services/publicips/mock_publicips/publicips_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockPublicIPScope is a mock of PublicIPScope interface. diff --git a/azure/services/publicips/publicips_test.go b/azure/services/publicips/publicips_test.go index a86bf9082aa..1451bcea8ca 100644 --- a/azure/services/publicips/publicips_test.go +++ b/azure/services/publicips/publicips_test.go @@ -28,7 +28,7 @@ import ( "go.uber.org/mock/gomock" "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -39,7 +39,7 @@ import ( ) func init() { - _ = clusterv1.AddToScheme(scheme.Scheme) + _ = clusterv1beta1.AddToScheme(scheme.Scheme) } var ( diff --git a/azure/services/resourcehealth/mock_resourcehealth/resourcehealth_mock.go b/azure/services/resourcehealth/mock_resourcehealth/resourcehealth_mock.go index 72210747533..2192eb2d069 100644 --- a/azure/services/resourcehealth/mock_resourcehealth/resourcehealth_mock.go +++ b/azure/services/resourcehealth/mock_resourcehealth/resourcehealth_mock.go @@ -30,8 +30,8 @@ import ( azcore "github.com/Azure/azure-sdk-for-go/sdk/azcore" gomock "go.uber.org/mock/gomock" - v1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" - conditions "sigs.k8s.io/cluster-api/util/conditions" + v1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // MockResourceHealthScope is a mock of ResourceHealthScope interface. diff --git a/azure/services/resourcehealth/resourcehealth.go b/azure/services/resourcehealth/resourcehealth.go index df4171b0f6b..aaa94b64da9 100644 --- a/azure/services/resourcehealth/resourcehealth.go +++ b/azure/services/resourcehealth/resourcehealth.go @@ -21,8 +21,8 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -37,14 +37,14 @@ const serviceName = "resourcehealth" type ResourceHealthScope interface { azure.Authorizer AvailabilityStatusResourceURI() string - AvailabilityStatusResource() conditions.Setter + AvailabilityStatusResource() v1beta1conditions.Setter } // AvailabilityStatusFilterer transforms the condition derived from the // availability status to allow the condition to be overridden in specific // circumstances. type AvailabilityStatusFilterer interface { - AvailabilityStatusFilter(cond *clusterv1.Condition) *clusterv1.Condition + AvailabilityStatusFilter(cond *clusterv1beta1.Condition) *clusterv1beta1.Condition } // Service provides operations on Azure resources. @@ -76,7 +76,7 @@ func (s *Service) Reconcile(ctx context.Context) error { defer done() if !feature.Gates.Enabled(feature.AKSResourceHealth) { - conditions.Delete(s.Scope.AvailabilityStatusResource(), infrav1.AzureResourceAvailableCondition) + v1beta1conditions.Delete(s.Scope.AvailabilityStatusResource(), infrav1.AzureResourceAvailableCondition) return nil } @@ -92,7 +92,7 @@ func (s *Service) Reconcile(ctx context.Context) error { cond = filterer.AvailabilityStatusFilter(cond) } - conditions.Set(s.Scope.AvailabilityStatusResource(), cond) + v1beta1conditions.Set(s.Scope.AvailabilityStatusResource(), cond) if cond.Status == corev1.ConditionFalse { return errors.Errorf("resource is not available: %s", cond.Message) diff --git a/azure/services/resourcehealth/resourcehealth_test.go b/azure/services/resourcehealth/resourcehealth_test.go index 197c6def5c9..4f4f4b21ea9 100644 --- a/azure/services/resourcehealth/resourcehealth_test.go +++ b/azure/services/resourcehealth/resourcehealth_test.go @@ -25,7 +25,7 @@ import ( "go.uber.org/mock/gomock" utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure/services/resourcehealth/mock_resourcehealth" @@ -89,7 +89,7 @@ func TestReconcileResourceHealth(t *testing.T) { }, }, nil) // ignore the above status - f.AvailabilityStatusFilter(gomock.Any()).Return(conditions.TrueCondition(infrav1.AzureResourceAvailableCondition)) + f.AvailabilityStatusFilter(gomock.Any()).Return(v1beta1conditions.TrueCondition(infrav1.AzureResourceAvailableCondition)) }, expectedError: "", }, diff --git a/azure/services/roleassignments/mock_roleassignments/roleassignments_mock.go b/azure/services/roleassignments/mock_roleassignments/roleassignments_mock.go index 83f389253f2..eebadf56abc 100644 --- a/azure/services/roleassignments/mock_roleassignments/roleassignments_mock.go +++ b/azure/services/roleassignments/mock_roleassignments/roleassignments_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockRoleAssignmentScope is a mock of RoleAssignmentScope interface. diff --git a/azure/services/routetables/mock_routetables/routetables_mock.go b/azure/services/routetables/mock_routetables/routetables_mock.go index 38893554290..0023fb49e5e 100644 --- a/azure/services/routetables/mock_routetables/routetables_mock.go +++ b/azure/services/routetables/mock_routetables/routetables_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockRouteTableScope is a mock of RouteTableScope interface. diff --git a/azure/services/scalesets/mock_scalesets/scalesets_mock.go b/azure/services/scalesets/mock_scalesets/scalesets_mock.go index cb5611a4d83..d0f3ba18485 100644 --- a/azure/services/scalesets/mock_scalesets/scalesets_mock.go +++ b/azure/services/scalesets/mock_scalesets/scalesets_mock.go @@ -34,7 +34,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockScaleSetScope is a mock of ScaleSetScope interface. diff --git a/azure/services/scalesets/scalesets_test.go b/azure/services/scalesets/scalesets_test.go index 16b625606fc..fc2253ba8df 100644 --- a/azure/services/scalesets/scalesets_test.go +++ b/azure/services/scalesets/scalesets_test.go @@ -28,7 +28,7 @@ import ( "go.uber.org/mock/gomock" "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -76,7 +76,7 @@ func internalError() *azcore.ResponseError { } func init() { - _ = clusterv1.AddToScheme(scheme.Scheme) + _ = clusterv1beta1.AddToScheme(scheme.Scheme) } func getDefaultVMSSSpec() azure.ResourceSpecGetter { diff --git a/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go b/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go index ad1074f2076..4d58f7867fb 100644 --- a/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go +++ b/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockScaleSetVMScope is a mock of ScaleSetVMScope interface. diff --git a/azure/services/securitygroups/mock_securitygroups/securitygroups_mock.go b/azure/services/securitygroups/mock_securitygroups/securitygroups_mock.go index 14f334d31cd..56c49277fcb 100644 --- a/azure/services/securitygroups/mock_securitygroups/securitygroups_mock.go +++ b/azure/services/securitygroups/mock_securitygroups/securitygroups_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockNSGScope is a mock of NSGScope interface. diff --git a/azure/services/subnets/mock_subnets/subnets_mock.go b/azure/services/subnets/mock_subnets/subnets_mock.go index 250a851cd3e..f75f8c7cb02 100644 --- a/azure/services/subnets/mock_subnets/subnets_mock.go +++ b/azure/services/subnets/mock_subnets/subnets_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" client "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/azure/services/virtualmachines/mock_virtualmachines/virtualmachines_mock.go b/azure/services/virtualmachines/mock_virtualmachines/virtualmachines_mock.go index 5434c151af0..aa3d12c58f1 100644 --- a/azure/services/virtualmachines/mock_virtualmachines/virtualmachines_mock.go +++ b/azure/services/virtualmachines/mock_virtualmachines/virtualmachines_mock.go @@ -34,7 +34,7 @@ import ( v1 "k8s.io/api/core/v1" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockVMScope is a mock of VMScope interface. diff --git a/azure/services/virtualmachines/virtualmachines.go b/azure/services/virtualmachines/virtualmachines.go index fea3ae256d5..55792d983ab 100644 --- a/azure/services/virtualmachines/virtualmachines.go +++ b/azure/services/virtualmachines/virtualmachines.go @@ -26,7 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" azprovider "sigs.k8s.io/cloud-provider-azure/pkg/provider" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -50,7 +50,7 @@ type VMScope interface { SetProviderID(string) SetAddresses([]corev1.NodeAddress) SetVMState(infrav1.ProvisioningState) - SetConditionFalse(clusterv1.ConditionType, string, clusterv1.ConditionSeverity, string) + SetConditionFalse(clusterv1beta1.ConditionType, string, clusterv1beta1.ConditionSeverity, string) } // Service provides operations on Azure resources. @@ -173,7 +173,7 @@ func (s *Service) checkUserAssignedIdentities(specIdentities []infrav1.UserAssig for _, expectedIdentity := range specIdentities { _, exists := actualMap[strings.TrimPrefix(expectedIdentity.ProviderID, azureutil.ProviderIDPrefix)] if !exists { - s.Scope.SetConditionFalse(infrav1.VMIdentitiesReadyCondition, infrav1.UserAssignedIdentityMissingReason, clusterv1.ConditionSeverityWarning, vmMissingUAI+expectedIdentity.ProviderID) + s.Scope.SetConditionFalse(infrav1.VMIdentitiesReadyCondition, infrav1.UserAssignedIdentityMissingReason, clusterv1beta1.ConditionSeverityWarning, vmMissingUAI+expectedIdentity.ProviderID) return } } diff --git a/azure/services/virtualmachines/virtualmachines_test.go b/azure/services/virtualmachines/virtualmachines_test.go index 7b1f6b85e08..51f38b1c023 100644 --- a/azure/services/virtualmachines/virtualmachines_test.go +++ b/azure/services/virtualmachines/virtualmachines_test.go @@ -29,7 +29,7 @@ import ( "go.uber.org/mock/gomock" corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure/services/async/mock_async" @@ -364,7 +364,7 @@ func TestCheckUserAssignedIdentities(t *testing.T) { scopeMock := mock_virtualmachines.NewMockVMScope(mockCtrl) if tc.expectedKey != "" { - scopeMock.EXPECT().SetConditionFalse(infrav1.VMIdentitiesReadyCondition, infrav1.UserAssignedIdentityMissingReason, clusterv1.ConditionSeverityWarning, vmMissingUAI+tc.expectedKey).Times(1) + scopeMock.EXPECT().SetConditionFalse(infrav1.VMIdentitiesReadyCondition, infrav1.UserAssignedIdentityMissingReason, clusterv1beta1.ConditionSeverityWarning, vmMissingUAI+tc.expectedKey).Times(1) } s := &Service{ Scope: scopeMock, diff --git a/azure/services/virtualnetworks/mock_virtualnetworks/virtualnetworks_mock.go b/azure/services/virtualnetworks/mock_virtualnetworks/virtualnetworks_mock.go index 3105239e8c1..90e75df8914 100644 --- a/azure/services/virtualnetworks/mock_virtualnetworks/virtualnetworks_mock.go +++ b/azure/services/virtualnetworks/mock_virtualnetworks/virtualnetworks_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" client "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/azure/services/vmextensions/mock_vmextensions/vmextensions_mock.go b/azure/services/vmextensions/mock_vmextensions/vmextensions_mock.go index bc2c351843e..1de99211e9c 100644 --- a/azure/services/vmextensions/mock_vmextensions/vmextensions_mock.go +++ b/azure/services/vmextensions/mock_vmextensions/vmextensions_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockVMExtensionScope is a mock of VMExtensionScope interface. diff --git a/azure/services/vnetpeerings/mock_vnetpeerings/vnetpeerings_mock.go b/azure/services/vnetpeerings/mock_vnetpeerings/vnetpeerings_mock.go index 5de5027bc75..fdeed68d05a 100644 --- a/azure/services/vnetpeerings/mock_vnetpeerings/vnetpeerings_mock.go +++ b/azure/services/vnetpeerings/mock_vnetpeerings/vnetpeerings_mock.go @@ -33,7 +33,7 @@ import ( gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" - v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta10 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // MockVnetPeeringScope is a mock of VnetPeeringScope interface. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedclusters.yaml index 57450a38cdd..cd441ebf18d 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azureasomanagedclusters.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedclustertemplates.yaml index 3094ba8187d..9cff187e6cd 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedclustertemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azureasomanagedclustertemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedcontrolplanes.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedcontrolplanes.yaml index 9d8b34375f1..2172a6c611b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedcontrolplanes.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedcontrolplanes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azureasomanagedcontrolplanes.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedcontrolplanetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedcontrolplanetemplates.yaml index 7064b003903..52b9139f313 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedcontrolplanetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedcontrolplanetemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azureasomanagedcontrolplanetemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedmachinepools.yaml index 9f6b0f6558c..75947d139ac 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedmachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedmachinepools.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azureasomanagedmachinepools.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedmachinepooltemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedmachinepooltemplates.yaml index 18875382a0c..84116792ce8 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedmachinepooltemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureasomanagedmachinepooltemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azureasomanagedmachinepooltemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusteridentities.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusteridentities.yaml index ec74f70f622..9aed585f981 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusteridentities.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusteridentities.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azureclusteridentities.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml index 1edca6b7df9..f384d092652 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azureclusters.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclustertemplates.yaml index 2b5acaaec48..d049164ff90 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclustertemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azureclustertemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepoolmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepoolmachines.yaml index 5f9996e2bd1..d51083f3189 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepoolmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepoolmachines.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azuremachinepoolmachines.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepools.yaml index 297318d1463..6d70d34c9cb 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinepools.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azuremachinepools.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml index d3c72222d1b..efcdb4783c2 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azuremachines.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml index 47d0b1bdeeb..acd990b52fc 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azuremachinetemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml index 2eb92caf68a..6787a70bcb9 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azuremanagedclusters.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclustertemplates.yaml index 9833b373530..3049fe5ce27 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclustertemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azuremanagedclustertemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml index b6c7fee62ab..c6af308b7df 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azuremanagedcontrolplanes.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml index 968f64e8dfd..f84e974111f 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azuremanagedcontrolplanetemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml index 05c3b2662c4..b4df6b7172a 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepools.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azuremanagedmachinepools.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepooltemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepooltemplates.yaml index 71854d2bd7e..0576e5fb414 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepooltemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepooltemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 name: azuremanagedmachinepooltemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 6706cf79b50..98fd935223a 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -21,6 +21,14 @@ rules: - namespaces verbs: - list +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch - apiGroups: - "" resources: @@ -127,14 +135,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/controllers/agentpooladopt_controller.go b/controllers/agentpooladopt_controller.go index 7ec4a824ea0..edd4b112e7e 100644 --- a/controllers/agentpooladopt_controller.go +++ b/controllers/agentpooladopt_controller.go @@ -25,8 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -145,7 +144,7 @@ func (r *AgentPoolAdoptReconciler) Reconcile(ctx context.Context, req ctrl.Reque if err != nil { return ctrl.Result{}, fmt.Errorf("failed to get AzureASOManagedControlPlane %s: %w", managedControlPlaneKey, err) } - clusterName := asoManagedControlPlane.Labels[clusterv1.ClusterNameLabel] + clusterName := asoManagedControlPlane.Labels[clusterv1beta1.ClusterNameLabel] asoManagedMachinePool := &infrav1.AzureASOManagedMachinePool{ ObjectMeta: metav1.ObjectMeta{ @@ -161,17 +160,17 @@ func (r *AgentPoolAdoptReconciler) Reconcile(ctx context.Context, req ctrl.Reque }, } - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: agentPool.Name, }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ ClusterName: clusterName, Replicas: replicas, - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ + Bootstrap: clusterv1beta1.Bootstrap{ DataSecretName: ptr.To(""), }, ClusterName: clusterName, @@ -187,7 +186,7 @@ func (r *AgentPoolAdoptReconciler) Reconcile(ctx context.Context, req ctrl.Reque if ptr.Deref(agentPool.Spec.EnableAutoScaling, false) { machinePool.Annotations = map[string]string{ - clusterv1.ReplicasManagedByAnnotation: infrav1.ReplicasManagedByAKS, + clusterv1beta1.ReplicasManagedByAnnotation: infrav1.ReplicasManagedByAKS, } } diff --git a/controllers/agentpooladopt_controller_test.go b/controllers/agentpooladopt_controller_test.go index 6329226af3e..5ba2f706857 100644 --- a/controllers/agentpooladopt_controller_test.go +++ b/controllers/agentpooladopt_controller_test.go @@ -25,8 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -73,7 +72,7 @@ func TestAgentPoolAdoptController(t *testing.T) { Name: "fake-managed-cluster", Namespace: "fake-ns", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster-name", + clusterv1beta1.ClusterNameLabel: "cluster-name", }, }, } @@ -88,7 +87,7 @@ func TestAgentPoolAdoptController(t *testing.T) { } _, err = aprec.Reconcile(ctx, req) g.Expect(err).ToNot(HaveOccurred()) - mp := &expv1.MachinePool{} + mp := &clusterv1beta1.MachinePool{} err = aprec.Get(ctx, types.NamespacedName{Name: agentPool.Name, Namespace: "fake-ns"}, mp) g.Expect(err).ToNot(HaveOccurred()) asoMP := &infrav1.AzureASOManagedMachinePool{} diff --git a/controllers/asosecret_controller.go b/controllers/asosecret_controller.go index 79dfb328859..05f3442ad40 100644 --- a/controllers/asosecret_controller.go +++ b/controllers/asosecret_controller.go @@ -29,9 +29,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -46,6 +45,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/aso" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // ASOSecretReconciler reconciles ASO secrets associated with AzureCluster objects. @@ -85,9 +85,9 @@ func (asos *ASOSecretReconciler) SetupWithManager(ctx context.Context, mgr ctrl. predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log, asos.WatchFilterValue), ), ). - // Add a watch on clusterv1.Cluster object for unpause notifications. + // Add a watch on clusterv1beta1.Cluster object for unpause notifications. Watches( - &clusterv1.Cluster{}, + &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind(infrav1.AzureClusterKind), mgr.GetClient(), &infrav1.AzureCluster{})), builder.WithPredicates( predicates.ClusterUnpaused(mgr.GetScheme(), log), @@ -150,7 +150,7 @@ func (asos *ASOSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request } var clusterIdentity *corev1.ObjectReference - var cluster *clusterv1.Cluster + var cluster *clusterv1beta1.Cluster var azureClient scope.AzureClients switch ownerType := asoSecretOwner.(type) { @@ -158,7 +158,7 @@ func (asos *ASOSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request clusterIdentity = ownerType.Spec.IdentityRef // Fetch the Cluster. - cluster, err = util.GetOwnerCluster(ctx, asos.Client, ownerType.ObjectMeta) + cluster, err = clusterv1beta1util.GetOwnerCluster(ctx, asos.Client, ownerType.ObjectMeta) if err != nil { return reconcile.Result{}, err } @@ -185,7 +185,7 @@ func (asos *ASOSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request clusterIdentity = ownerType.Spec.IdentityRef // Fetch the Cluster. - cluster, err = util.GetOwnerCluster(ctx, asos.Client, ownerType.ObjectMeta) + cluster, err = clusterv1beta1util.GetOwnerCluster(ctx, asos.Client, ownerType.ObjectMeta) if err != nil { return reconcile.Result{}, err } @@ -219,7 +219,7 @@ func (asos *ASOSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request log = log.WithValues("cluster", cluster.Name) // Return early if the ASO Secret Owner(AzureCluster or AzureManagedControlPlane) or Cluster is paused. - if annotations.IsPaused(cluster, asoSecretOwner) { + if clusterv1beta1util.IsPaused(cluster, asoSecretOwner) { log.Info(fmt.Sprintf("%T or linked Cluster is marked as paused. Won't reconcile", asoSecretOwner)) asos.Recorder.Eventf(asoSecretOwner, corev1.EventTypeNormal, "ClusterPaused", fmt.Sprintf("%T or linked Cluster is marked as paused. Won't reconcile", asoSecretOwner)) @@ -251,7 +251,7 @@ func (asos *ASOSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, nil } -func (asos *ASOSecretReconciler) createSecretFromClusterIdentity(ctx context.Context, clusterIdentity *corev1.ObjectReference, cluster *clusterv1.Cluster, azureClient scope.AzureClients) (*corev1.Secret, error) { +func (asos *ASOSecretReconciler) createSecretFromClusterIdentity(ctx context.Context, clusterIdentity *corev1.ObjectReference, cluster *clusterv1beta1.Cluster, azureClient scope.AzureClients) (*corev1.Secret, error) { newASOSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: aso.GetASOSecretName(cluster.GetName()), diff --git a/controllers/asosecret_controller_test.go b/controllers/asosecret_controller_test.go index f0a8ae7cf7b..aa4dd89dc8e 100644 --- a/controllers/asosecret_controller_test.go +++ b/controllers/asosecret_controller_test.go @@ -28,7 +28,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -44,7 +44,7 @@ func TestASOSecretReconcile(t *testing.T) { os.Setenv("AZURE_SUBSCRIPTION_ID", "fooSubscription") //nolint:gosec,usetesting // we want to use os.Setenv here instead of t.Setenv scheme := runtime.NewScheme() - _ = clusterv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) _ = clientgoscheme.AddToScheme(scheme) @@ -64,7 +64,7 @@ func TestASOSecretReconcile(t *testing.T) { "should not fail if the azure cluster is not found": { clusterName: defaultAzureCluster.Name, objects: []runtime.Object{ - getASOCluster(func(c *clusterv1.Cluster) { + getASOCluster(func(c *clusterv1beta1.Cluster) { c.Spec.InfrastructureRef.Name = defaultAzureCluster.Name c.Spec.InfrastructureRef.Kind = defaultAzureCluster.Kind }), @@ -282,7 +282,7 @@ func TestASOSecretReconcile(t *testing.T) { "should return if cluster is paused": { clusterName: defaultAzureCluster.Name, objects: []runtime.Object{ - getASOCluster(func(c *clusterv1.Cluster) { + getASOCluster(func(c *clusterv1beta1.Cluster) { c.Spec.Paused = true }), getASOAzureCluster(func(c *infrav1.AzureCluster) { @@ -354,18 +354,18 @@ func TestASOSecretReconcile(t *testing.T) { } } -func getASOCluster(changes ...func(*clusterv1.Cluster)) *clusterv1.Cluster { - input := &clusterv1.Cluster{ +func getASOCluster(changes ...func(*clusterv1beta1.Cluster)) *clusterv1beta1.Cluster { + input := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.String(), }, }, - Status: clusterv1.ClusterStatus{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, } @@ -384,7 +384,7 @@ func getASOAzureCluster(changes ...func(*infrav1.AzureCluster)) *infrav1.AzureCl Namespace: "default", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), Kind: "Cluster", Name: "my-cluster", }, @@ -412,7 +412,7 @@ func getASOAzureManagedControlPlane(changes ...func(*infrav1.AzureManagedControl { Name: "my-cluster", Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, diff --git a/controllers/azureasomanagedcluster_controller.go b/controllers/azureasomanagedcluster_controller.go index e7eb41f7807..0a05e839468 100644 --- a/controllers/azureasomanagedcluster_controller.go +++ b/controllers/azureasomanagedcluster_controller.go @@ -24,11 +24,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -43,6 +43,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/pkg/mutators" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) var errInvalidControlPlaneKind = errors.New("AzureASOManagedCluster cannot be used without AzureASOManagedControlPlane") @@ -83,7 +84,7 @@ func (r *AzureASOManagedClusterReconciler) SetupWithManager(ctx context.Context, WithEventFilter(predicates.ResourceIsNotExternallyManaged(mgr.GetScheme(), log)). // Watch clusters for pause/unpause notifications Watches( - &clusterv1.Cluster{}, + &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc( util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind(infrav1.AzureASOManagedClusterKind), mgr.GetClient(), &infrav1.AzureASOManagedCluster{}), ), @@ -139,7 +140,7 @@ func asoManagedControlPlaneToManagedClusterMap(c client.Client) handler.MapFunc return func(ctx context.Context, o client.Object) []reconcile.Request { asoManagedControlPlane := o.(*infrav1.AzureASOManagedControlPlane) - cluster, err := util.GetOwnerCluster(ctx, c, asoManagedControlPlane.ObjectMeta) + cluster, err := clusterv1beta1util.GetOwnerCluster(ctx, c, asoManagedControlPlane.ObjectMeta) if err != nil { return nil } @@ -187,7 +188,7 @@ func (r *AzureASOManagedClusterReconciler) Reconcile(ctx context.Context, req ct return ctrl.Result{}, client.IgnoreNotFound(err) } - patchHelper, err := patch.NewHelper(asoManagedCluster, r.Client) + patchHelper, err := v1beta1patch.NewHelper(asoManagedCluster, r.Client) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to create patch helper: %w", err) } @@ -201,7 +202,7 @@ func (r *AzureASOManagedClusterReconciler) Reconcile(ctx context.Context, req ct asoManagedCluster.Status.Ready = false - cluster, err := util.GetOwnerCluster(ctx, r.Client, asoManagedCluster.ObjectMeta) + cluster, err := clusterv1beta1util.GetOwnerCluster(ctx, r.Client, asoManagedCluster.ObjectMeta) if err != nil { return ctrl.Result{}, err } @@ -218,7 +219,7 @@ func (r *AzureASOManagedClusterReconciler) Reconcile(ctx context.Context, req ct return r.reconcileNormal(ctx, asoManagedCluster, cluster) } -func (r *AzureASOManagedClusterReconciler) reconcileNormal(ctx context.Context, asoManagedCluster *infrav1.AzureASOManagedCluster, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *AzureASOManagedClusterReconciler) reconcileNormal(ctx context.Context, asoManagedCluster *infrav1.AzureASOManagedCluster, cluster *clusterv1beta1.Cluster) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedClusterReconciler.reconcileNormal", ) @@ -235,7 +236,7 @@ func (r *AzureASOManagedClusterReconciler) reconcileNormal(ctx context.Context, return ctrl.Result{}, reconcile.TerminalError(errInvalidControlPlaneKind) } - needsPatch := controllerutil.AddFinalizer(asoManagedCluster, clusterv1.ClusterFinalizer) + needsPatch := controllerutil.AddFinalizer(asoManagedCluster, clusterv1beta1.ClusterFinalizer) needsPatch = AddBlockMoveAnnotation(asoManagedCluster) || needsPatch if needsPatch { return ctrl.Result{Requeue: true}, nil @@ -307,7 +308,7 @@ func (r *AzureASOManagedClusterReconciler) reconcileDelete(ctx context.Context, } if len(asoManagedCluster.Status.Resources) == 0 { - controllerutil.RemoveFinalizer(asoManagedCluster, clusterv1.ClusterFinalizer) + controllerutil.RemoveFinalizer(asoManagedCluster, clusterv1beta1.ClusterFinalizer) } return ctrl.Result{}, nil diff --git a/controllers/azureasomanagedcluster_controller_test.go b/controllers/azureasomanagedcluster_controller_test.go index 692992b34e9..222581b4c43 100644 --- a/controllers/azureasomanagedcluster_controller_test.go +++ b/controllers/azureasomanagedcluster_controller_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -71,7 +71,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { s := runtime.NewScheme() sb := runtime.NewSchemeBuilder( infrav1.AddToScheme, - clusterv1.AddToScheme, + clusterv1beta1.AddToScheme, ) NewGomegaWithT(t).Expect(sb.AddToScheme(s)).To(Succeed()) @@ -103,7 +103,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Namespace: "ns", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "Cluster", Name: "cluster", }, @@ -123,12 +123,12 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { t.Run("adds a finalizer and block-move annotation", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1somethingelse", Kind: infrav1.AzureASOManagedControlPlaneKind, @@ -141,7 +141,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "Cluster", Name: cluster.Name, }, @@ -159,19 +159,19 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { g.Expect(result).To(Equal(ctrl.Result{Requeue: true})) g.Expect(c.Get(ctx, client.ObjectKeyFromObject(asoManagedCluster), asoManagedCluster)).To(Succeed()) - g.Expect(asoManagedCluster.GetFinalizers()).To(ContainElement(clusterv1.ClusterFinalizer)) + g.Expect(asoManagedCluster.GetFinalizers()).To(ContainElement(clusterv1beta1.ClusterFinalizer)) g.Expect(asoManagedCluster.GetAnnotations()).To(HaveKey(clusterctlv1.BlockMoveAnnotation)) }) t.Run("reconciles resources that are not ready", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.Identifier(), Kind: infrav1.AzureASOManagedControlPlaneKind, @@ -184,13 +184,13 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "Cluster", Name: cluster.Name, }, }, Finalizers: []string{ - clusterv1.ClusterFinalizer, + clusterv1beta1.ClusterFinalizer, }, Annotations: map[string]string{ clusterctlv1.BlockMoveAnnotation: "true", @@ -230,12 +230,12 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { t.Run("successfully reconciles normally", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.Identifier(), Kind: infrav1.AzureASOManagedControlPlaneKind, @@ -250,13 +250,13 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "Cluster", Name: cluster.Name, }, }, Finalizers: []string{ - clusterv1.ClusterFinalizer, + clusterv1beta1.ClusterFinalizer, }, Annotations: map[string]string{ clusterctlv1.BlockMoveAnnotation: "true", @@ -272,7 +272,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Namespace: cluster.Namespace, }, Status: infrav1.AzureASOManagedControlPlaneStatus{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{Host: "endpoint"}, + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{Host: "endpoint"}, }, } c := fakeClientBuilder(). @@ -300,12 +300,12 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { t.Run("successfully reconciles pause", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ Paused: true, }, } @@ -315,7 +315,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "Cluster", Name: cluster.Name, }, @@ -354,7 +354,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Name: "amc", Namespace: "ns", Finalizers: []string{ - clusterv1.ClusterFinalizer, + clusterv1beta1.ClusterFinalizer, }, DeletionTimestamp: &metav1.Time{Time: time.Date(1, 0, 0, 0, 0, 0, 0, time.UTC)}, }, @@ -386,7 +386,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { err = c.Get(ctx, client.ObjectKeyFromObject(asoManagedCluster), asoManagedCluster) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(asoManagedCluster.GetFinalizers()).To(ContainElement(clusterv1.ClusterFinalizer)) + g.Expect(asoManagedCluster.GetFinalizers()).To(ContainElement(clusterv1beta1.ClusterFinalizer)) }) t.Run("successfully reconciles finished delete", func(t *testing.T) { @@ -397,7 +397,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Name: "amc", Namespace: "ns", Finalizers: []string{ - clusterv1.ClusterFinalizer, + clusterv1beta1.ClusterFinalizer, }, DeletionTimestamp: &metav1.Time{Time: time.Date(1, 0, 0, 0, 0, 0, 0, time.UTC)}, }, diff --git a/controllers/azureasomanagedcontrolplane_controller.go b/controllers/azureasomanagedcontrolplane_controller.go index 25b8843e303..6c3d886d889 100644 --- a/controllers/azureasomanagedcontrolplane_controller.go +++ b/controllers/azureasomanagedcontrolplane_controller.go @@ -30,11 +30,11 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/clientcmd" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/secret" ctrl "sigs.k8s.io/controller-runtime" @@ -48,6 +48,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/pkg/mutators" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) var errInvalidClusterKind = errors.New("AzureASOManagedControlPlane cannot be used without AzureASOManagedCluster") @@ -73,7 +74,7 @@ func (r *AzureASOManagedControlPlaneReconciler) SetupWithManager(ctx context.Con WithOptions(options). For(&infrav1.AzureASOManagedControlPlane{}). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue)). - Watches(&clusterv1.Cluster{}, + Watches(&clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterToAzureASOManagedControlPlane), builder.WithPredicates( predicates.ResourceHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue), @@ -112,7 +113,7 @@ func (r *AzureASOManagedControlPlaneReconciler) SetupWithManager(ctx context.Con } func clusterToAzureASOManagedControlPlane(_ context.Context, o client.Object) []ctrl.Request { - controlPlaneRef := o.(*clusterv1.Cluster).Spec.ControlPlaneRef + controlPlaneRef := o.(*clusterv1beta1.Cluster).Spec.ControlPlaneRef if controlPlaneRef != nil && matchesASOManagedAPIGroup(controlPlaneRef.APIVersion) && controlPlaneRef.Kind == infrav1.AzureASOManagedControlPlaneKind { @@ -123,7 +124,7 @@ func clusterToAzureASOManagedControlPlane(_ context.Context, o client.Object) [] func (r *AzureASOManagedControlPlaneReconciler) azureASOManagedMachinePoolToAzureASOManagedControlPlane(ctx context.Context, o client.Object) []ctrl.Request { asoManagedMachinePool := o.(*infrav1.AzureASOManagedMachinePool) - clusterName := asoManagedMachinePool.Labels[clusterv1.ClusterNameLabel] + clusterName := asoManagedMachinePool.Labels[clusterv1beta1.ClusterNameLabel] if clusterName == "" { return nil } @@ -154,7 +155,7 @@ func (r *AzureASOManagedControlPlaneReconciler) Reconcile(ctx context.Context, r return ctrl.Result{}, client.IgnoreNotFound(err) } - patchHelper, err := patch.NewHelper(asoManagedControlPlane, r.Client) + patchHelper, err := v1beta1patch.NewHelper(asoManagedControlPlane, r.Client) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to create patch helper: %w", err) } @@ -169,7 +170,7 @@ func (r *AzureASOManagedControlPlaneReconciler) Reconcile(ctx context.Context, r asoManagedControlPlane.Status.Ready = false asoManagedControlPlane.Status.Initialized = false - cluster, err := util.GetOwnerCluster(ctx, r.Client, asoManagedControlPlane.ObjectMeta) + cluster, err := clusterv1beta1util.GetOwnerCluster(ctx, r.Client, asoManagedControlPlane.ObjectMeta) if err != nil { return ctrl.Result{}, err } @@ -186,7 +187,7 @@ func (r *AzureASOManagedControlPlaneReconciler) Reconcile(ctx context.Context, r return r.reconcileNormal(ctx, asoManagedControlPlane, cluster) } -func (r *AzureASOManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, asoManagedControlPlane *infrav1.AzureASOManagedControlPlane, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *AzureASOManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, asoManagedControlPlane *infrav1.AzureASOManagedControlPlane, cluster *clusterv1beta1.Cluster) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedControlPlaneReconciler.reconcileNormal", ) @@ -266,7 +267,7 @@ func (r *AzureASOManagedControlPlaneReconciler) reconcileNormal(ctx context.Cont return result, nil } -func (r *AzureASOManagedControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, asoManagedControlPlane *infrav1.AzureASOManagedControlPlane, cluster *clusterv1.Cluster, managedCluster *asocontainerservicev1.ManagedCluster) (*time.Duration, error) { +func (r *AzureASOManagedControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, asoManagedControlPlane *infrav1.AzureASOManagedControlPlane, cluster *clusterv1beta1.Cluster, managedCluster *asocontainerservicev1.ManagedCluster) (*time.Duration, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedControlPlaneReconciler.reconcileKubeconfig", ) @@ -338,7 +339,7 @@ func (r *AzureASOManagedControlPlaneReconciler) reconcileKubeconfig(ctx context. OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(asoManagedControlPlane, infrav1.GroupVersion.WithKind(infrav1.AzureASOManagedControlPlaneKind)), }, - Labels: map[string]string{clusterv1.ClusterNameLabel: cluster.Name}, + Labels: map[string]string{clusterv1beta1.ClusterNameLabel: cluster.Name}, }, Data: map[string][]byte{ secret.KubeconfigDataName: kubeconfigData, @@ -392,18 +393,18 @@ func (r *AzureASOManagedControlPlaneReconciler) reconcileDelete(ctx context.Cont return ctrl.Result{}, nil } -func getControlPlaneEndpoint(managedCluster *asocontainerservicev1.ManagedCluster) clusterv1.APIEndpoint { +func getControlPlaneEndpoint(managedCluster *asocontainerservicev1.ManagedCluster) clusterv1beta1.APIEndpoint { if managedCluster.Status.PrivateFQDN != nil { - return clusterv1.APIEndpoint{ + return clusterv1beta1.APIEndpoint{ Host: *managedCluster.Status.PrivateFQDN, Port: 443, } } if managedCluster.Status.Fqdn != nil { - return clusterv1.APIEndpoint{ + return clusterv1beta1.APIEndpoint{ Host: *managedCluster.Status.Fqdn, Port: 443, } } - return clusterv1.APIEndpoint{} + return clusterv1beta1.APIEndpoint{} } diff --git a/controllers/azureasomanagedcontrolplane_controller_test.go b/controllers/azureasomanagedcontrolplane_controller_test.go index 871d9c9d193..e87b30ccc3e 100644 --- a/controllers/azureasomanagedcontrolplane_controller_test.go +++ b/controllers/azureasomanagedcontrolplane_controller_test.go @@ -36,7 +36,7 @@ import ( "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/util/secret" ctrl "sigs.k8s.io/controller-runtime" @@ -52,7 +52,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { s := runtime.NewScheme() sb := runtime.NewSchemeBuilder( infrav1.AddToScheme, - clusterv1.AddToScheme, + clusterv1beta1.AddToScheme, asocontainerservicev1.AddToScheme, corev1.AddToScheme, ) @@ -85,7 +85,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { Namespace: "ns", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "Cluster", Name: "cluster", }, @@ -105,12 +105,12 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { t.Run("adds a finalizer and block-move annotation", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1somethingelse", Kind: infrav1.AzureASOManagedClusterKind, @@ -123,7 +123,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "Cluster", Name: cluster.Name, }, @@ -148,12 +148,12 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { t.Run("reconciles resources that are not ready", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.Identifier(), Kind: infrav1.AzureASOManagedClusterKind, @@ -166,7 +166,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "Cluster", Name: cluster.Name, }, @@ -225,12 +225,12 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { t.Run("successfully reconciles normally", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.Identifier(), Kind: infrav1.AzureASOManagedClusterKind, @@ -272,7 +272,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "Cluster", Name: cluster.Name, }, @@ -337,12 +337,12 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { t.Run("successfully reconciles a kubeconfig with a token", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.Identifier(), Kind: infrav1.AzureASOManagedClusterKind, @@ -398,7 +398,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "Cluster", Name: cluster.Name, }, @@ -476,12 +476,12 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { t.Run("successfully reconciles a kubeconfig with a token that has expired", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.Identifier(), Kind: infrav1.AzureASOManagedClusterKind, @@ -537,7 +537,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "Cluster", Name: cluster.Name, }, @@ -614,12 +614,12 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { t.Run("successfully reconciles pause", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ Paused: true, }, } @@ -629,7 +629,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "Cluster", Name: cluster.Name, }, @@ -699,12 +699,12 @@ func TestGetControlPlaneEndpoint(t *testing.T) { tests := []struct { name string managedCluster *asocontainerservicev1.ManagedCluster - expected clusterv1.APIEndpoint + expected clusterv1beta1.APIEndpoint }{ { name: "empty", managedCluster: &asocontainerservicev1.ManagedCluster{}, - expected: clusterv1.APIEndpoint{}, + expected: clusterv1beta1.APIEndpoint{}, }, { name: "public fqdn", @@ -713,7 +713,7 @@ func TestGetControlPlaneEndpoint(t *testing.T) { Fqdn: ptr.To("fqdn"), }, }, - expected: clusterv1.APIEndpoint{ + expected: clusterv1beta1.APIEndpoint{ Host: "fqdn", Port: 443, }, @@ -725,7 +725,7 @@ func TestGetControlPlaneEndpoint(t *testing.T) { PrivateFQDN: ptr.To("fqdn"), }, }, - expected: clusterv1.APIEndpoint{ + expected: clusterv1beta1.APIEndpoint{ Host: "fqdn", Port: 443, }, @@ -738,7 +738,7 @@ func TestGetControlPlaneEndpoint(t *testing.T) { Fqdn: ptr.To("public"), }, }, - expected: clusterv1.APIEndpoint{ + expected: clusterv1beta1.APIEndpoint{ Host: "private", Port: 443, }, diff --git a/controllers/azureasomanagedmachinepool_controller.go b/controllers/azureasomanagedmachinepool_controller.go index 87156d77d12..3c2774cd668 100644 --- a/controllers/azureasomanagedmachinepool_controller.go +++ b/controllers/azureasomanagedmachinepool_controller.go @@ -27,13 +27,11 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" utilexp "sigs.k8s.io/cluster-api/exp/util" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -46,6 +44,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/pkg/mutators" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // AzureASOManagedMachinePoolReconciler reconciles a AzureASOManagedMachinePool object. @@ -80,7 +79,7 @@ func (r *AzureASOManagedMachinePoolReconciler) SetupWithManager(ctx context.Cont For(&infrav1.AzureASOManagedMachinePool{}). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue)). Watches( - &clusterv1.Cluster{}, + &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterToAzureASOManagedMachinePools), builder.WithPredicates( predicates.ResourceHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue), @@ -91,7 +90,7 @@ func (r *AzureASOManagedMachinePoolReconciler) SetupWithManager(ctx context.Cont ), ). Watches( - &expv1.MachinePool{}, + &clusterv1beta1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(utilexp.MachinePoolToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind(infrav1.AzureASOManagedMachinePoolKind)), ), @@ -143,7 +142,7 @@ func (r *AzureASOManagedMachinePoolReconciler) Reconcile(ctx context.Context, re return ctrl.Result{}, client.IgnoreNotFound(err) } - patchHelper, err := patch.NewHelper(asoManagedMachinePool, r.Client) + patchHelper, err := v1beta1patch.NewHelper(asoManagedMachinePool, r.Client) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to create patch helper: %w", err) } @@ -157,7 +156,7 @@ func (r *AzureASOManagedMachinePoolReconciler) Reconcile(ctx context.Context, re asoManagedMachinePool.Status.Ready = false - machinePool, err := utilexp.GetOwnerMachinePool(ctx, r.Client, asoManagedMachinePool.ObjectMeta) + machinePool, err := clusterv1beta1util.GetOwnerMachinePool(ctx, r.Client, asoManagedMachinePool.ObjectMeta) if err != nil { return ctrl.Result{}, err } @@ -176,12 +175,12 @@ func (r *AzureASOManagedMachinePoolReconciler) Reconcile(ctx context.Context, re } }() - cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) + cluster, err := clusterv1beta1util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) if err != nil { return ctrl.Result{}, fmt.Errorf("AzureASOManagedMachinePool owner MachinePool is missing cluster label or cluster does not exist: %w", err) } if cluster == nil { - log.Info(fmt.Sprintf("Waiting for MachinePool controller to set %s label on MachinePool", clusterv1.ClusterNameLabel)) + log.Info(fmt.Sprintf("Waiting for MachinePool controller to set %s label on MachinePool", clusterv1beta1.ClusterNameLabel)) return ctrl.Result{}, nil } if cluster.Spec.ControlPlaneRef == nil || @@ -190,7 +189,7 @@ func (r *AzureASOManagedMachinePoolReconciler) Reconcile(ctx context.Context, re return ctrl.Result{}, reconcile.TerminalError(fmt.Errorf("AzureASOManagedMachinePool cannot be used without AzureASOManagedControlPlane")) } - if annotations.IsPaused(cluster, asoManagedMachinePool) { + if clusterv1beta1util.IsPaused(cluster, asoManagedMachinePool) { return r.reconcilePaused(ctx, asoManagedMachinePool) } @@ -201,14 +200,14 @@ func (r *AzureASOManagedMachinePoolReconciler) Reconcile(ctx context.Context, re return r.reconcileNormal(ctx, asoManagedMachinePool, machinePool, cluster) } -func (r *AzureASOManagedMachinePoolReconciler) reconcileNormal(ctx context.Context, asoManagedMachinePool *infrav1.AzureASOManagedMachinePool, machinePool *expv1.MachinePool, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *AzureASOManagedMachinePoolReconciler) reconcileNormal(ctx context.Context, asoManagedMachinePool *infrav1.AzureASOManagedMachinePool, machinePool *clusterv1beta1.MachinePool, cluster *clusterv1beta1.Cluster) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedMachinePoolReconciler.reconcileNormal", ) defer done() log.V(4).Info("reconciling normally") - needsPatch := controllerutil.AddFinalizer(asoManagedMachinePool, clusterv1.ClusterFinalizer) + needsPatch := controllerutil.AddFinalizer(asoManagedMachinePool, clusterv1beta1.ClusterFinalizer) needsPatch = AddBlockMoveAnnotation(asoManagedMachinePool) || needsPatch if needsPatch { return ctrl.Result{Requeue: true}, nil @@ -281,7 +280,7 @@ func (r *AzureASOManagedMachinePoolReconciler) reconcileNormal(ctx context.Conte slices.Sort(providerIDs) asoManagedMachinePool.Spec.ProviderIDList = providerIDs asoManagedMachinePool.Status.Replicas = int32(ptr.Deref(agentPool.Status.Count, 0)) - if _, autoscaling := machinePool.Annotations[clusterv1.ReplicasManagedByAnnotation]; autoscaling { + if _, autoscaling := machinePool.Annotations[clusterv1beta1.ReplicasManagedByAnnotation]; autoscaling { machinePool.Spec.Replicas = &asoManagedMachinePool.Status.Replicas } @@ -323,7 +322,7 @@ func (r *AzureASOManagedMachinePoolReconciler) reconcilePaused(ctx context.Conte return ctrl.Result{}, nil } -func (r *AzureASOManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, asoManagedMachinePool *infrav1.AzureASOManagedMachinePool, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *AzureASOManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, asoManagedMachinePool *infrav1.AzureASOManagedMachinePool, cluster *clusterv1beta1.Cluster) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedMachinePoolReconciler.reconcileDelete", ) @@ -343,6 +342,6 @@ func (r *AzureASOManagedMachinePoolReconciler) reconcileDelete(ctx context.Conte } } - controllerutil.RemoveFinalizer(asoManagedMachinePool, clusterv1.ClusterFinalizer) + controllerutil.RemoveFinalizer(asoManagedMachinePool, clusterv1beta1.ClusterFinalizer) return ctrl.Result{}, nil } diff --git a/controllers/azureasomanagedmachinepool_controller_test.go b/controllers/azureasomanagedmachinepool_controller_test.go index 7680a533732..fc512781ec1 100644 --- a/controllers/azureasomanagedmachinepool_controller_test.go +++ b/controllers/azureasomanagedmachinepool_controller_test.go @@ -31,9 +31,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -58,8 +57,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { s := runtime.NewScheme() sb := runtime.NewSchemeBuilder( infrav1.AddToScheme, - clusterv1.AddToScheme, - expv1.AddToScheme, + clusterv1beta1.AddToScheme, asocontainerservicev1.AddToScheme, ) NewGomegaWithT(t).Expect(sb.AddToScheme(s)).To(Succeed()) @@ -91,7 +89,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Namespace: "ns", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: expv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "MachinePool", Name: "mp", }, @@ -119,19 +117,19 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Namespace: "ns", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: expv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "MachinePool", Name: "mp", }, }, }, } - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", Namespace: asoManagedMachinePool.Namespace, Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster", + clusterv1beta1.ClusterNameLabel: "cluster", }, }, } @@ -150,12 +148,12 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { t.Run("adds a finalizer and block-move annotation", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1somethingelse", Kind: infrav1.AzureASOManagedControlPlaneKind, @@ -168,19 +166,19 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: expv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "MachinePool", Name: "mp", }, }, }, } - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", Namespace: cluster.Namespace, Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster", + clusterv1beta1.ClusterNameLabel: "cluster", }, }, } @@ -195,19 +193,19 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { g.Expect(result).To(Equal(ctrl.Result{Requeue: true})) g.Expect(c.Get(ctx, client.ObjectKeyFromObject(asoManagedMachinePool), asoManagedMachinePool)).To(Succeed()) - g.Expect(asoManagedMachinePool.GetFinalizers()).To(ContainElement(clusterv1.ClusterFinalizer)) + g.Expect(asoManagedMachinePool.GetFinalizers()).To(ContainElement(clusterv1beta1.ClusterFinalizer)) g.Expect(asoManagedMachinePool.GetAnnotations()).To(HaveKey(clusterctlv1.BlockMoveAnnotation)) }) t.Run("reconciles resources that are not ready", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.Identifier(), Kind: infrav1.AzureASOManagedControlPlaneKind, @@ -220,13 +218,13 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: expv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "MachinePool", Name: "mp", }, }, Finalizers: []string{ - clusterv1.ClusterFinalizer, + clusterv1beta1.ClusterFinalizer, }, Annotations: map[string]string{ clusterctlv1.BlockMoveAnnotation: "true", @@ -249,12 +247,12 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Ready: true, }, } - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", Namespace: cluster.Namespace, Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster", + clusterv1beta1.ClusterNameLabel: "cluster", }, }, } @@ -288,12 +286,12 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { t.Run("successfully reconciles normally", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.Identifier(), Kind: infrav1.AzureASOManagedControlPlaneKind, @@ -330,13 +328,13 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: expv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "MachinePool", Name: "mp", }, }, Finalizers: []string{ - clusterv1.ClusterFinalizer, + clusterv1beta1.ClusterFinalizer, }, Annotations: map[string]string{ clusterctlv1.BlockMoveAnnotation: "true", @@ -355,15 +353,15 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Ready: false, }, } - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", Namespace: cluster.Namespace, Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster", + clusterv1beta1.ClusterNameLabel: "cluster", }, }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, } @@ -430,12 +428,12 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { t.Run("successfully reconciles normally with autoscaling", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.Identifier(), Kind: infrav1.AzureASOManagedControlPlaneKind, @@ -473,13 +471,13 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: expv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "MachinePool", Name: "mp", }, }, Finalizers: []string{ - clusterv1.ClusterFinalizer, + clusterv1beta1.ClusterFinalizer, }, Annotations: map[string]string{ clusterctlv1.BlockMoveAnnotation: "true", @@ -498,12 +496,12 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Ready: false, }, } - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", Namespace: cluster.Namespace, Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster", + clusterv1beta1.ClusterNameLabel: "cluster", }, }, } @@ -540,12 +538,12 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { t.Run("successfully reconciles pause", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ Paused: true, ControlPlaneRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.Identifier(), @@ -559,7 +557,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: expv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "MachinePool", Name: "mp", }, @@ -569,12 +567,12 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { }, }, } - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", Namespace: cluster.Namespace, Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster", + clusterv1beta1.ClusterNameLabel: "cluster", }, }, } @@ -602,12 +600,12 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { t.Run("successfully reconciles delete", func(t *testing.T) { g := NewGomegaWithT(t) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.Identifier(), Kind: infrav1.AzureASOManagedControlPlaneKind, @@ -620,23 +618,23 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: expv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "MachinePool", Name: "mp", }, }, DeletionTimestamp: &metav1.Time{Time: time.Date(1, 0, 0, 0, 0, 0, 0, time.UTC)}, Finalizers: []string{ - clusterv1.ClusterFinalizer, + clusterv1beta1.ClusterFinalizer, }, }, } - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", Namespace: cluster.Namespace, Labels: map[string]string{ - clusterv1.ClusterNameLabel: "cluster", + clusterv1beta1.ClusterNameLabel: "cluster", }, }, } diff --git a/controllers/azurecluster_controller.go b/controllers/azurecluster_controller.go index fd76ad655e9..3ac96127841 100644 --- a/controllers/azurecluster_controller.go +++ b/controllers/azurecluster_controller.go @@ -24,10 +24,9 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -42,6 +41,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // AzureClusterReconciler reconciles an AzureCluster object. @@ -89,9 +89,9 @@ func (acr *AzureClusterReconciler) SetupWithManager(ctx context.Context, mgr ctr For(&infrav1.AzureCluster{}). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log, acr.WatchFilterValue)). WithEventFilter(predicates.ResourceIsNotExternallyManaged(mgr.GetScheme(), log)). - // Add a watch on clusterv1.Cluster object for pause/unpause notifications. + // Add a watch on clusterv1beta1.Cluster object for pause/unpause notifications. Watches( - &clusterv1.Cluster{}, + &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind(infrav1.AzureClusterKind), mgr.GetClient(), &infrav1.AzureCluster{})), builder.WithPredicates( ClusterUpdatePauseChange(log), @@ -139,7 +139,7 @@ func (acr *AzureClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque } // Fetch the Cluster. - cluster, err := util.GetOwnerCluster(ctx, acr.Client, azureCluster.ObjectMeta) + cluster, err := clusterv1beta1util.GetOwnerCluster(ctx, acr.Client, azureCluster.ObjectMeta) if err != nil { return reconcile.Result{}, err } @@ -173,7 +173,7 @@ func (acr *AzureClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque }() // Return early if the object or Cluster is paused. - if annotations.IsPaused(cluster, azureCluster) { + if clusterv1beta1util.IsPaused(cluster, azureCluster) { acr.Recorder.Eventf(azureCluster, corev1.EventTypeNormal, "ClusterPaused", "AzureCluster or linked Cluster is marked as paused. Won't reconcile normally") log.Info("AzureCluster or linked Cluster is marked as paused. Won't reconcile normally") return acr.reconcilePause(ctx, clusterScope) @@ -227,7 +227,7 @@ func (acr *AzureClusterReconciler) reconcileNormal(ctx context.Context, clusterS if reconcileError.IsTerminal() { acr.Recorder.Eventf(clusterScope.AzureCluster, corev1.EventTypeWarning, "ReconcileError", errors.Wrapf(err, "failed to reconcile AzureCluster").Error()) log.Error(err, "failed to reconcile AzureCluster", "name", clusterScope.ClusterName()) - conditions.MarkFalse(azureCluster, infrav1.NetworkInfrastructureReadyCondition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(azureCluster, infrav1.NetworkInfrastructureReadyCondition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "") return reconcile.Result{}, nil } if reconcileError.IsTransient() { @@ -242,7 +242,7 @@ func (acr *AzureClusterReconciler) reconcileNormal(ctx context.Context, clusterS wrappedErr := errors.Wrap(err, "failed to reconcile cluster services") acr.Recorder.Eventf(azureCluster, corev1.EventTypeWarning, "ClusterReconcilerNormalFailed", "%s", wrappedErr.Error()) - conditions.MarkFalse(azureCluster, infrav1.NetworkInfrastructureReadyCondition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s", wrappedErr.Error()) + v1beta1conditions.MarkFalse(azureCluster, infrav1.NetworkInfrastructureReadyCondition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s", wrappedErr.Error()) return reconcile.Result{}, wrappedErr } @@ -256,17 +256,17 @@ func (acr *AzureClusterReconciler) reconcileNormal(ctx context.Context, clusterS } } else { if azureCluster.Spec.ControlPlaneEndpoint.Host == "" { - conditions.MarkFalse(azureCluster, infrav1.NetworkInfrastructureReadyCondition, "ExternallyManagedControlPlane", clusterv1.ConditionSeverityInfo, "Waiting for the Control Plane host") + v1beta1conditions.MarkFalse(azureCluster, infrav1.NetworkInfrastructureReadyCondition, "ExternallyManagedControlPlane", clusterv1beta1.ConditionSeverityInfo, "Waiting for the Control Plane host") return reconcile.Result{}, nil } else if azureCluster.Spec.ControlPlaneEndpoint.Port == 0 { - conditions.MarkFalse(azureCluster, infrav1.NetworkInfrastructureReadyCondition, "ExternallyManagedControlPlane", clusterv1.ConditionSeverityInfo, "Waiting for the Control Plane port") + v1beta1conditions.MarkFalse(azureCluster, infrav1.NetworkInfrastructureReadyCondition, "ExternallyManagedControlPlane", clusterv1beta1.ConditionSeverityInfo, "Waiting for the Control Plane port") return reconcile.Result{}, nil } } // No errors, so mark us ready so the Cluster API Cluster Controller can pull it azureCluster.Status.Ready = true - conditions.MarkTrue(azureCluster, infrav1.NetworkInfrastructureReadyCondition) + v1beta1conditions.MarkTrue(azureCluster, infrav1.NetworkInfrastructureReadyCondition) return reconcile.Result{}, nil } @@ -319,7 +319,7 @@ func (acr *AzureClusterReconciler) reconcileDelete(ctx context.Context, clusterS wrappedErr := errors.Wrapf(err, "error deleting AzureCluster %s/%s", azureCluster.Namespace, azureCluster.Name) acr.Recorder.Eventf(azureCluster, corev1.EventTypeWarning, "ClusterReconcilerDeleteFailed", "%s", wrappedErr.Error()) - conditions.MarkFalse(azureCluster, infrav1.NetworkInfrastructureReadyCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(azureCluster, infrav1.NetworkInfrastructureReadyCondition, clusterv1beta1.DeletionFailedReason, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return reconcile.Result{}, wrappedErr } diff --git a/controllers/azurecluster_controller_test.go b/controllers/azurecluster_controller_test.go index 468b733dc62..1ca44c03c7f 100644 --- a/controllers/azurecluster_controller_test.go +++ b/controllers/azurecluster_controller_test.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -247,7 +247,7 @@ func TestAzureClusterReconcilePaused(t *testing.T) { ctx := t.Context() sb := runtime.NewSchemeBuilder( - clusterv1.AddToScheme, + clusterv1beta1.AddToScheme, infrav1.AddToScheme, asoresourcesv1.AddToScheme, asonetworkv1.AddToScheme, @@ -279,12 +279,12 @@ func TestAzureClusterReconcilePaused(t *testing.T) { name := test.RandomName("paused", 10) namespace := namespace - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ Paused: true, }, } @@ -300,7 +300,7 @@ func TestAzureClusterReconcilePaused(t *testing.T) { OwnerReferences: []metav1.OwnerReference{ { Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), Name: cluster.Name, UID: cluster.UID, }, diff --git a/controllers/azurecluster_reconciler.go b/controllers/azurecluster_reconciler.go index 0510f9ab647..e3e9b86b79a 100644 --- a/controllers/azurecluster_reconciler.go +++ b/controllers/azurecluster_reconciler.go @@ -20,7 +20,7 @@ import ( "context" "github.com/pkg/errors" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" @@ -199,7 +199,7 @@ func (s *azureClusterService) setFailureDomainsForLocation(ctx context.Context) } for _, zone := range zones { - s.scope.SetFailureDomain(zone, clusterv1.FailureDomainSpec{ + s.scope.SetFailureDomain(zone, clusterv1beta1.FailureDomainSpec{ ControlPlane: true, }) } diff --git a/controllers/azurecluster_reconciler_test.go b/controllers/azurecluster_reconciler_test.go index ef71688dc3d..9a7c4999fd2 100644 --- a/controllers/azurecluster_reconciler_test.go +++ b/controllers/azurecluster_reconciler_test.go @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -83,7 +83,7 @@ func TestAzureClusterServiceReconcile(t *testing.T) { s := &azureClusterService{ scope: &scope.ClusterScope{ - Cluster: &clusterv1.Cluster{}, + Cluster: &clusterv1beta1.Cluster{}, AzureCluster: &infrav1.AzureCluster{}, }, services: []azure.ServiceReconciler{ @@ -358,7 +358,7 @@ func TestAzureClusterServiceDelete(t *testing.T) { }, }, }, - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, Namespace: namespace, diff --git a/controllers/azurejson_machine_controller.go b/controllers/azurejson_machine_controller.go index 7df5345ca6a..39309b4a678 100644 --- a/controllers/azurejson_machine_controller.go +++ b/controllers/azurejson_machine_controller.go @@ -28,9 +28,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -48,6 +47,7 @@ import ( azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // AzureJSONMachineReconciler reconciles Azure json secrets for AzureMachine objects. @@ -80,10 +80,10 @@ func (r *AzureJSONMachineReconciler) SetupWithManager(ctx context.Context, mgr c // Add a watch on Clusters to requeue when the infraRef is set. This is needed because the infraRef is not initially // set in Clusters created from a ClusterClass. Watches( - &clusterv1.Cluster{}, + &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(azureMachineMapper), builder.WithPredicates( - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log), predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue), ), ). @@ -96,7 +96,9 @@ type filterUnclonedMachinesPredicate struct { } func (f filterUnclonedMachinesPredicate) Create(e event.CreateEvent) bool { - return f.Generic(event.GenericEvent(e)) + return f.Generic(event.GenericEvent{ + Object: e.Object, + }) } func (f filterUnclonedMachinesPredicate) Update(e event.UpdateEvent) bool { @@ -117,7 +119,7 @@ func (f filterUnclonedMachinesPredicate) Generic(e event.GenericEvent) bool { // or machinedeployment, we already created a secret for the template. All machines // in the machinedeployment will share that one secret. gvk := infrav1.GroupVersion.WithKind("AzureMachineTemplate") - isClonedFromTemplate := e.Object.GetAnnotations()[clusterv1.TemplateClonedFromGroupKindAnnotation] == gvk.GroupKind().String() + isClonedFromTemplate := e.Object.GetAnnotations()[clusterv1beta1.TemplateClonedFromGroupKindAnnotation] == gvk.GroupKind().String() return !isClonedFromTemplate } @@ -148,7 +150,7 @@ func (r *AzureJSONMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req } // Fetch the Cluster. - cluster, err := util.GetClusterFromMetadata(ctx, r.Client, azureMachine.ObjectMeta) + cluster, err := clusterv1beta1util.GetClusterFromMetadata(ctx, r.Client, azureMachine.ObjectMeta) if err != nil { return reconcile.Result{}, err } @@ -160,7 +162,7 @@ func (r *AzureJSONMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req log = log.WithValues("cluster", cluster.Name) // Return early if the object or Cluster is paused. - if annotations.IsPaused(cluster, azureMachine) { + if clusterv1beta1util.IsPaused(cluster, azureMachine) { log.Info("AzureMachine or linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } diff --git a/controllers/azurejson_machine_controller_test.go b/controllers/azurejson_machine_controller_test.go index f0f0cb0b2dd..1c4499c12df 100644 --- a/controllers/azurejson_machine_controller_test.go +++ b/controllers/azurejson_machine_controller_test.go @@ -26,8 +26,7 @@ import ( "k8s.io/apimachinery/pkg/types" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/event" @@ -51,7 +50,7 @@ func TestUnclonedMachinesPredicate(t *testing.T) { "uncloned control plane node should return true": { expected: true, labels: map[string]string{ - clusterv1.MachineControlPlaneLabel: "", + clusterv1beta1.MachineControlPlaneLabel: "", }, annotations: nil, }, @@ -59,7 +58,7 @@ func TestUnclonedMachinesPredicate(t *testing.T) { expected: false, labels: nil, annotations: map[string]string{ - clusterv1.TemplateClonedFromGroupKindAnnotation: infrav1.GroupVersion.WithKind("AzureMachineTemplate").GroupKind().String(), + clusterv1beta1.TemplateClonedFromGroupKindAnnotation: infrav1.GroupVersion.WithKind("AzureMachineTemplate").GroupKind().String(), }, }, } @@ -90,11 +89,11 @@ func TestAzureJSONMachineReconciler(t *testing.T) { t.Error(err) } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: infrav1.AzureClusterKind, @@ -139,7 +138,7 @@ func TestAzureJSONMachineReconciler(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "my-machine", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "my-cluster", + clusterv1beta1.ClusterNameLabel: "my-cluster", }, OwnerReferences: []metav1.OwnerReference{ { @@ -187,11 +186,11 @@ func TestAzureJSONMachineReconciler(t *testing.T) { }, "infra ref is nil": { objects: []runtime.Object{ - &clusterv1.Cluster{ + &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: nil, }, }, @@ -204,11 +203,11 @@ func TestAzureJSONMachineReconciler(t *testing.T) { }, "infra ref is not an azure cluster": { objects: []runtime.Object{ - &clusterv1.Cluster{ + &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: "FooCluster", @@ -259,9 +258,8 @@ func newScheme() (*runtime.Scheme, error) { schemeFn := []func(*runtime.Scheme) error{ clientgoscheme.AddToScheme, infrav1.AddToScheme, - clusterv1.AddToScheme, + clusterv1beta1.AddToScheme, infrav1exp.AddToScheme, - expv1.AddToScheme, corev1.AddToScheme, } for _, fn := range schemeFn { diff --git a/controllers/azurejson_machinepool_controller.go b/controllers/azurejson_machinepool_controller.go index 3cf418680ba..3dbb19a8bbd 100644 --- a/controllers/azurejson_machinepool_controller.go +++ b/controllers/azurejson_machinepool_controller.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" @@ -43,6 +43,7 @@ import ( azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // AzureJSONMachinePoolReconciler reconciles Azure json secrets for AzureMachinePool objects. @@ -74,10 +75,10 @@ func (r *AzureJSONMachinePoolReconciler) SetupWithManager(ctx context.Context, m // Add a watch on Clusters to requeue when the infraRef is set. This is needed because the infraRef is not initially // set in Clusters created from a ClusterClass. Watches( - &clusterv1.Cluster{}, + &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(azureMachinePoolMapper), builder.WithPredicates( - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log), predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue), ), ). @@ -124,7 +125,7 @@ func (r *AzureJSONMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl log = log.WithValues("machinePool", machinePool.Name) // Fetch the Cluster. - cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) + cluster, err := clusterv1beta1util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) if err != nil { log.Info("MachinePool is missing cluster label or cluster does not exist") return reconcile.Result{}, nil diff --git a/controllers/azurejson_machinepool_controller_test.go b/controllers/azurejson_machinepool_controller_test.go index 3a694ab08fc..f5adf5d2a00 100644 --- a/controllers/azurejson_machinepool_controller_test.go +++ b/controllers/azurejson_machinepool_controller_test.go @@ -30,8 +30,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -49,11 +48,11 @@ func TestAzureJSONPoolReconciler(t *testing.T) { t.Error(err) } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: infrav1.AzureClusterKind, @@ -95,11 +94,11 @@ func TestAzureJSONPoolReconciler(t *testing.T) { }, } - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "my-machine-pool", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "my-cluster", + clusterv1beta1.ClusterNameLabel: "my-cluster", }, OwnerReferences: []metav1.OwnerReference{ { @@ -169,11 +168,11 @@ func TestAzureJSONPoolReconciler(t *testing.T) { }, "infra ref is nil": { objects: []runtime.Object{ - &clusterv1.Cluster{ + &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: nil, }, }, @@ -187,11 +186,11 @@ func TestAzureJSONPoolReconciler(t *testing.T) { }, "infra ref is not an azure cluster": { objects: []runtime.Object{ - &clusterv1.Cluster{ + &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: "FooCluster", @@ -258,11 +257,11 @@ func TestAzureJSONPoolReconcilerUserAssignedIdentities(t *testing.T) { Name: "fake-machine-pool", Namespace: "fake-ns", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "fake-cluster", + clusterv1beta1.ClusterNameLabel: "fake-cluster", }, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: fmt.Sprintf("%s/%s", expv1.GroupVersion.Group, expv1.GroupVersion.Version), + APIVersion: fmt.Sprintf("%s/%s", clusterv1beta1.GroupVersion.Group, clusterv1beta1.GroupVersion.Version), Kind: "MachinePool", Name: "fake-other-machine-pool", Controller: to.Ptr(true), @@ -278,12 +277,12 @@ func TestAzureJSONPoolReconcilerUserAssignedIdentities(t *testing.T) { }, } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "fake-cluster", Namespace: "fake-ns", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ Kind: "AzureCluster", Name: "fake-azure-cluster", @@ -292,12 +291,12 @@ func TestAzureJSONPoolReconcilerUserAssignedIdentities(t *testing.T) { }, } - ownerMP := &expv1.MachinePool{ + ownerMP := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "fake-other-machine-pool", Namespace: "fake-ns", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "fake-cluster", + clusterv1beta1.ClusterNameLabel: "fake-cluster", }, }, } diff --git a/controllers/azurejson_machinetemplate_controller.go b/controllers/azurejson_machinetemplate_controller.go index 2c1e4184bec..1bc81dfa0d8 100644 --- a/controllers/azurejson_machinetemplate_controller.go +++ b/controllers/azurejson_machinetemplate_controller.go @@ -27,9 +27,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -45,6 +44,7 @@ import ( azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // AzureJSONTemplateReconciler reconciles Azure json secrets for AzureMachineTemplate objects. @@ -76,10 +76,10 @@ func (r *AzureJSONTemplateReconciler) SetupWithManager(ctx context.Context, mgr // Add a watch on Clusters to requeue when the infraRef is set. This is needed because the infraRef is not initially // set in Clusters created from a ClusterClass. Watches( - &clusterv1.Cluster{}, + &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(azureMachineTemplateMapper), builder.WithPredicates( - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log), predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue), ), ). @@ -110,7 +110,7 @@ func (r *AzureJSONTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Re } // Fetch the Cluster. - cluster, err := util.GetOwnerCluster(ctx, r.Client, azureMachineTemplate.ObjectMeta) + cluster, err := clusterv1beta1util.GetOwnerCluster(ctx, r.Client, azureMachineTemplate.ObjectMeta) if err != nil { return reconcile.Result{}, err } @@ -122,7 +122,7 @@ func (r *AzureJSONTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Re log = log.WithValues("cluster", cluster.Name) // Return early if the object or Cluster is paused. - if annotations.IsPaused(cluster, azureMachineTemplate) { + if clusterv1beta1util.IsPaused(cluster, azureMachineTemplate) { log.Info("AzureMachineTemplate or linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } diff --git a/controllers/azurejson_machinetemplate_controller_test.go b/controllers/azurejson_machinetemplate_controller_test.go index 858b0ea9637..6c057398384 100644 --- a/controllers/azurejson_machinetemplate_controller_test.go +++ b/controllers/azurejson_machinetemplate_controller_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -39,11 +39,11 @@ func TestAzureJSONTemplateReconciler(t *testing.T) { t.Error(err) } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: infrav1.AzureClusterKind, @@ -124,11 +124,11 @@ func TestAzureJSONTemplateReconciler(t *testing.T) { }, "infra ref is nil": { objects: []runtime.Object{ - &clusterv1.Cluster{ + &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: nil, }, }, @@ -139,11 +139,11 @@ func TestAzureJSONTemplateReconciler(t *testing.T) { }, "infra ref is not an azure cluster": { objects: []runtime.Object{ - &clusterv1.Cluster{ + &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: "FooCluster", diff --git a/controllers/azuremachine_controller.go b/controllers/azuremachine_controller.go index 6466fabf3b2..88024e58b67 100644 --- a/controllers/azuremachine_controller.go +++ b/controllers/azuremachine_controller.go @@ -24,10 +24,9 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -42,6 +41,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // AzureMachineReconciler reconciles an AzureMachine object. @@ -101,7 +101,7 @@ func (amr *AzureMachineReconciler) SetupWithManager(ctx context.Context, mgr ctr WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log, amr.WatchFilterValue)). // watch for changes in CAPI Machine resources Watches( - &clusterv1.Machine{}, + &clusterv1beta1.Machine{}, handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("AzureMachine"))), ). // watch for changes in AzureCluster @@ -109,9 +109,9 @@ func (amr *AzureMachineReconciler) SetupWithManager(ctx context.Context, mgr ctr &infrav1.AzureCluster{}, handler.EnqueueRequestsFromMapFunc(azureClusterToAzureMachinesMapper), ). - // Add a watch on clusterv1.Cluster object for pause/unpause & ready notifications. + // Add a watch on clusterv1beta1.Cluster object for pause/unpause & ready notifications. Watches( - &clusterv1.Cluster{}, + &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(azureMachineMapper), builder.WithPredicates( ClusterPauseChangeAndInfrastructureReady(mgr.GetScheme(), log), @@ -152,7 +152,7 @@ func (amr *AzureMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque } // Fetch the Machine. - machine, err := util.GetOwnerMachine(ctx, amr.Client, azureMachine.ObjectMeta) + machine, err := clusterv1beta1util.GetOwnerMachine(ctx, amr.Client, azureMachine.ObjectMeta) if err != nil { return reconcile.Result{}, err } @@ -165,7 +165,7 @@ func (amr *AzureMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque log = log.WithValues("machine", machine.Name) // Fetch the Cluster. - cluster, err := util.GetClusterFromMetadata(ctx, amr.Client, machine.ObjectMeta) + cluster, err := clusterv1beta1util.GetClusterFromMetadata(ctx, amr.Client, machine.ObjectMeta) if err != nil { amr.Recorder.Eventf(azureMachine, corev1.EventTypeNormal, "Unable to get cluster from metadata", "Machine is missing cluster label or cluster does not exist") log.Info("Machine is missing cluster label or cluster does not exist") @@ -219,7 +219,7 @@ func (amr *AzureMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque }() // Return early if the object or Cluster is paused. - if annotations.IsPaused(cluster, azureMachine) { + if clusterv1beta1util.IsPaused(cluster, azureMachine) { log.Info("AzureMachine or linked Cluster is marked as paused. Won't reconcile normally") return amr.reconcilePause(ctx, machineScope) } @@ -257,14 +257,14 @@ func (amr *AzureMachineReconciler) reconcileNormal(ctx context.Context, machineS // Make sure the Cluster Infrastructure is ready. if !clusterScope.Cluster.Status.InfrastructureReady { log.Info("Cluster infrastructure is not ready yet") - conditions.MarkFalse(machineScope.AzureMachine, infrav1.VMRunningCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AzureMachine, infrav1.VMRunningCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") return reconcile.Result{}, nil } // Make sure bootstrap data is available and populated. if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { log.Info("Bootstrap data secret reference is not yet available") - conditions.MarkFalse(machineScope.AzureMachine, infrav1.VMRunningCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AzureMachine, infrav1.VMRunningCondition, infrav1.WaitingForBootstrapDataReason, clusterv1beta1.ConditionSeverityInfo, "") return reconcile.Result{}, nil } @@ -285,7 +285,7 @@ func (amr *AzureMachineReconciler) reconcileNormal(ctx context.Context, machineS } // Mark the AzureMachine as failed if the identities are not ready. - cond := conditions.Get(machineScope.AzureMachine, infrav1.VMIdentitiesReadyCondition) + cond := v1beta1conditions.Get(machineScope.AzureMachine, infrav1.VMIdentitiesReadyCondition) if cond != nil && cond.Status == corev1.ConditionFalse && cond.Reason == infrav1.UserAssignedIdentityMissingReason { amr.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, infrav1.UserAssignedIdentityMissingReason, "VM is unhealthy") machineScope.SetFailureReason(azure.UnsupportedChange) @@ -364,7 +364,7 @@ func (amr *AzureMachineReconciler) reconcileDelete(ctx context.Context, machineS defer done() log.Info("Handling deleted AzureMachine") - conditions.MarkFalse(machineScope.AzureMachine, infrav1.VMRunningCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AzureMachine, infrav1.VMRunningCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := machineScope.PatchObject(ctx); err != nil { return reconcile.Result{}, err } diff --git a/controllers/azuremachine_controller_test.go b/controllers/azuremachine_controller_test.go index e61ed32d6ad..d09a9cbc69a 100644 --- a/controllers/azuremachine_controller_test.go +++ b/controllers/azuremachine_controller_test.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -196,7 +196,7 @@ func TestAzureMachineReconcileNormal(t *testing.T) { }, "should fail if identities are not ready": { azureMachineOptions: func(am *infrav1.AzureMachine) { - am.Status.Conditions = clusterv1.Conditions{ + am.Status.Conditions = clusterv1beta1.Conditions{ { Type: infrav1.VMIdentitiesReadyCondition, Reason: infrav1.UserAssignedIdentityMissingReason, @@ -366,8 +366,8 @@ func getMachineReconcileInputs(tc TestMachineReconcileInput) (*AzureMachineRecon azureCluster := getFakeAzureCluster(func(ac *infrav1.AzureCluster) { ac.Spec.Location = "westus2" }) - machine := getFakeMachine(azureMachine, func(m *clusterv1.Machine) { - m.Spec.Bootstrap = clusterv1.Bootstrap{ + machine := getFakeMachine(azureMachine, func(m *clusterv1beta1.Machine) { + m.Spec.Bootstrap = clusterv1beta1.Bootstrap{ DataSecretName: ptr.To("fooSecret"), } }) @@ -529,20 +529,20 @@ func getDefaultAzureMachineService(machineScope *scope.MachineScope, cache *reso } } -func getFakeCluster() *clusterv1.Cluster { - return &clusterv1.Cluster{ +func getFakeCluster() *clusterv1beta1.Cluster { + return &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: infrav1.AzureClusterKind, Name: "my-azure-cluster", }, }, - Status: clusterv1.ClusterStatus{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, } @@ -585,7 +585,7 @@ func getFakeAzureCluster(changes ...func(*infrav1.AzureCluster)) *infrav1.AzureC }, }, }, - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Port: 6443, }, }, @@ -603,7 +603,7 @@ func getFakeAzureMachine(changes ...func(*infrav1.AzureMachine)) *infrav1.AzureM Name: "my-machine", Namespace: "default", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "my-cluster", + clusterv1beta1.ClusterNameLabel: "my-cluster", }, OwnerReferences: []metav1.OwnerReference{ { @@ -644,20 +644,20 @@ func getFakeAzureClusterIdentity(changes ...func(*infrav1.AzureClusterIdentity)) return input } -func getFakeMachine(azureMachine *infrav1.AzureMachine, changes ...func(*clusterv1.Machine)) *clusterv1.Machine { - input := &clusterv1.Machine{ +func getFakeMachine(azureMachine *infrav1.AzureMachine, changes ...func(*clusterv1beta1.Machine)) *clusterv1beta1.Machine { + input := &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "my-machine", Namespace: "default", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "my-cluster", + clusterv1beta1.ClusterNameLabel: "my-cluster", }, }, TypeMeta: metav1.TypeMeta{ APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "Machine", }, - Spec: clusterv1.MachineSpec{ + Spec: clusterv1beta1.MachineSpec{ InfrastructureRef: corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: "AzureMachine", @@ -681,20 +681,20 @@ func TestConditions(t *testing.T) { testcases := []struct { name string - clusterStatus clusterv1.ClusterStatus - machine *clusterv1.Machine + clusterStatus clusterv1beta1.ClusterStatus + machine *clusterv1beta1.Machine azureMachine *infrav1.AzureMachine - expectedConditions []clusterv1.Condition + expectedConditions []clusterv1beta1.Condition }{ { name: "cluster infrastructure is not ready yet", - clusterStatus: clusterv1.ClusterStatus{ + clusterStatus: clusterv1beta1.ClusterStatus{ InfrastructureReady: false, }, - machine: &clusterv1.Machine{ + machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.ClusterNameLabel: "my-cluster", + clusterv1beta1.ClusterNameLabel: "my-cluster", }, Name: "my-machine", }, @@ -704,29 +704,29 @@ func TestConditions(t *testing.T) { Name: "azure-test1", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), Kind: "Machine", Name: "test1", }, }, }, }, - expectedConditions: []clusterv1.Condition{{ + expectedConditions: []clusterv1beta1.Condition{{ Type: "VMRunning", Status: corev1.ConditionFalse, - Severity: clusterv1.ConditionSeverityInfo, + Severity: clusterv1beta1.ConditionSeverityInfo, Reason: "WaitingForClusterInfrastructure", }}, }, { name: "bootstrap data secret reference is not yet available", - clusterStatus: clusterv1.ClusterStatus{ + clusterStatus: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, - machine: &clusterv1.Machine{ + machine: &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.ClusterNameLabel: "my-cluster", + clusterv1beta1.ClusterNameLabel: "my-cluster", }, Name: "my-machine", }, @@ -736,24 +736,24 @@ func TestConditions(t *testing.T) { Name: "azure-test1", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), Kind: "Machine", Name: "test1", }, }, }, }, - expectedConditions: []clusterv1.Condition{{ + expectedConditions: []clusterv1beta1.Condition{{ Type: "VMRunning", Status: corev1.ConditionFalse, - Severity: clusterv1.ConditionSeverityInfo, + Severity: clusterv1beta1.ConditionSeverityInfo, Reason: "WaitingForBootstrapData", }}, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", }, @@ -819,7 +819,7 @@ func TestConditions(t *testing.T) { } } -func conditionsMatch(i, j clusterv1.Condition) bool { +func conditionsMatch(i, j clusterv1beta1.Condition) bool { return i.Type == j.Type && i.Status == j.Status && i.Reason == j.Reason && diff --git a/controllers/azuremachine_reconciler_test.go b/controllers/azuremachine_reconciler_test.go index cd04f95361d..bc49ece89a5 100644 --- a/controllers/azuremachine_reconciler_test.go +++ b/controllers/azuremachine_reconciler_test.go @@ -23,7 +23,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" . "github.com/onsi/gomega" "go.uber.org/mock/gomock" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -75,9 +75,9 @@ func TestAzureMachineServiceReconcile(t *testing.T) { scope: &scope.MachineScope{ ClusterScoper: &scope.ClusterScope{ AzureCluster: &infrav1.AzureCluster{}, - Cluster: &clusterv1.Cluster{}, + Cluster: &clusterv1beta1.Cluster{}, }, - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{ Spec: infrav1.AzureMachineSpec{ SubnetName: "test-subnet", @@ -214,9 +214,9 @@ func TestAzureMachineServiceDelete(t *testing.T) { scope: &scope.MachineScope{ ClusterScoper: &scope.ClusterScope{ AzureCluster: &infrav1.AzureCluster{}, - Cluster: &clusterv1.Cluster{}, + Cluster: &clusterv1beta1.Cluster{}, }, - Machine: &clusterv1.Machine{}, + Machine: &clusterv1beta1.Machine{}, AzureMachine: &infrav1.AzureMachine{}, }, services: []azure.ServiceReconciler{ diff --git a/controllers/azuremanagedcluster_controller.go b/controllers/azuremanagedcluster_controller.go index fbc4b584128..1d2494baa27 100644 --- a/controllers/azuremanagedcluster_controller.go +++ b/controllers/azuremanagedcluster_controller.go @@ -23,10 +23,9 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -38,6 +37,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // AzureManagedClusterReconciler reconciles an AzureManagedCluster object. @@ -77,9 +77,9 @@ func (amcr *AzureManagedClusterReconciler) SetupWithManager(ctx context.Context, &infrav1.AzureManagedControlPlane{}, handler.EnqueueRequestsFromMapFunc(azureManagedControlPlaneMapper), ). - // Add a watch on clusterv1.Cluster object for unpause notifications. + // Add a watch on clusterv1beta1.Cluster object for unpause notifications. Watches( - &clusterv1.Cluster{}, + &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind(infrav1.AzureManagedClusterKind), mgr.GetClient(), &infrav1.AzureManagedCluster{})), builder.WithPredicates( predicates.ClusterUnpaused(mgr.GetScheme(), log), @@ -119,7 +119,7 @@ func (amcr *AzureManagedClusterReconciler) Reconcile(ctx context.Context, req ct } // Fetch the Cluster. - cluster, err := util.GetOwnerCluster(ctx, amcr.Client, aksCluster.ObjectMeta) + cluster, err := clusterv1beta1util.GetOwnerCluster(ctx, amcr.Client, aksCluster.ObjectMeta) if err != nil { return reconcile.Result{}, err } @@ -137,7 +137,7 @@ func (amcr *AzureManagedClusterReconciler) Reconcile(ctx context.Context, req ct log = log.WithValues("cluster", cluster.Name) // Return early if the object or Cluster is paused. - if annotations.IsPaused(cluster, aksCluster) { + if clusterv1beta1util.IsPaused(cluster, aksCluster) { log.Info("AzureManagedCluster or linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } @@ -148,7 +148,7 @@ func (amcr *AzureManagedClusterReconciler) Reconcile(ctx context.Context, req ct log = log.WithValues("controlPlane", controlPlaneRef.Name) - patchhelper, err := patch.NewHelper(aksCluster, amcr.Client) + patchhelper, err := v1beta1patch.NewHelper(aksCluster, amcr.Client) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to init patch helper") } diff --git a/controllers/azuremanagedcluster_controller_test.go b/controllers/azuremanagedcluster_controller_test.go index fc20c79691a..8366bca8f8e 100644 --- a/controllers/azuremanagedcluster_controller_test.go +++ b/controllers/azuremanagedcluster_controller_test.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -39,14 +39,14 @@ func TestAzureManagedClusterController(t *testing.T) { ctx := t.Context() scheme := runtime.NewScheme() g.Expect(infrav1.AddToScheme(scheme)).To(Succeed()) - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) + g.Expect(clusterv1beta1.AddToScheme(scheme)).To(Succeed()) - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "fake-capi-cluster", Namespace: "fake-namespace", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ Name: "fake-control-plane", }, @@ -58,7 +58,7 @@ func TestAzureManagedClusterController(t *testing.T) { Namespace: "fake-namespace", }, Spec: infrav1.AzureManagedControlPlaneSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Host: "fake-host", Port: int32(8080), }, diff --git a/controllers/azuremanagedcontrolplane_controller.go b/controllers/azuremanagedcontrolplane_controller.go index b2982f7636d..a1502e5b7c4 100644 --- a/controllers/azuremanagedcontrolplane_controller.go +++ b/controllers/azuremanagedcontrolplane_controller.go @@ -24,11 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - capiexputil "sigs.k8s.io/cluster-api/exp/util" - "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -43,6 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // AzureManagedControlPlaneReconciler reconciles an AzureManagedControlPlane object. @@ -90,12 +87,12 @@ func (amcpr *AzureManagedControlPlaneReconciler) SetupWithManager(ctx context.Co ). // watch MachinePool resources Watches( - &expv1.MachinePool{}, + &clusterv1beta1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(azureManagedMachinePoolMapper), ). - // Add a watch on clusterv1.Cluster object for pause/unpause & ready notifications. + // Add a watch on clusterv1beta1.Cluster object for pause/unpause & ready notifications. Watches( - &clusterv1.Cluster{}, + &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(amcpr.ClusterToAzureManagedControlPlane), builder.WithPredicates( ClusterPauseChangeAndInfrastructureReady(mgr.GetScheme(), log), @@ -142,7 +139,7 @@ func (amcpr *AzureManagedControlPlaneReconciler) Reconcile(ctx context.Context, } // Fetch the Cluster. - cluster, err := util.GetOwnerCluster(ctx, amcpr.Client, azureControlPlane.ObjectMeta) + cluster, err := clusterv1beta1util.GetOwnerCluster(ctx, amcpr.Client, azureControlPlane.ObjectMeta) if err != nil { return reconcile.Result{}, err } @@ -156,7 +153,7 @@ func (amcpr *AzureManagedControlPlaneReconciler) Reconcile(ctx context.Context, // Fetch all the ManagedMachinePools owned by this Cluster. opt1 := client.InNamespace(azureControlPlane.Namespace) opt2 := client.MatchingLabels(map[string]string{ - clusterv1.ClusterNameLabel: cluster.Name, + clusterv1beta1.ClusterNameLabel: cluster.Name, }) ammpList := &infrav1.AzureManagedMachinePoolList{} @@ -168,7 +165,7 @@ func (amcpr *AzureManagedControlPlaneReconciler) Reconcile(ctx context.Context, for i, ammp := range ammpList.Items { // Fetch the owner MachinePool. - ownerPool, err := capiexputil.GetOwnerMachinePool(ctx, amcpr.Client, ammp.ObjectMeta) + ownerPool, err := clusterv1beta1util.GetOwnerMachinePool(ctx, amcpr.Client, ammp.ObjectMeta) if err != nil || ownerPool == nil { return reconcile.Result{}, errors.Wrapf(err, "failed to fetch owner MachinePool for AzureManagedMachinePool: %s", ammp.Name) } @@ -199,7 +196,7 @@ func (amcpr *AzureManagedControlPlaneReconciler) Reconcile(ctx context.Context, }() // Return early if the object or Cluster is paused. - if annotations.IsPaused(cluster, azureControlPlane) { + if clusterv1beta1util.IsPaused(cluster, azureControlPlane) { log.Info("AzureManagedControlPlane or linked Cluster is marked as paused. Won't reconcile normally") return amcpr.reconcilePause(ctx, mcpScope) } @@ -338,7 +335,7 @@ func (amcpr *AzureManagedControlPlaneReconciler) reconcileDelete(ctx context.Con // ClusterToAzureManagedControlPlane is a handler.ToRequestsFunc to be used to enqueue requests for // reconciliation for AzureManagedControlPlane based on updates to a Cluster. func (amcpr *AzureManagedControlPlaneReconciler) ClusterToAzureManagedControlPlane(_ context.Context, o client.Object) []ctrl.Request { - c, ok := o.(*clusterv1.Cluster) + c, ok := o.(*clusterv1beta1.Cluster) if !ok { panic(fmt.Sprintf("Expected a Cluster but got a %T", o)) } diff --git a/controllers/azuremanagedcontrolplane_controller_test.go b/controllers/azuremanagedcontrolplane_controller_test.go index f13c3431601..1a5b84980fa 100644 --- a/controllers/azuremanagedcontrolplane_controller_test.go +++ b/controllers/azuremanagedcontrolplane_controller_test.go @@ -30,8 +30,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -83,8 +83,8 @@ func TestClusterToAzureManagedControlPlane(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { g := NewWithT(t) - actual := (&AzureManagedControlPlaneReconciler{}).ClusterToAzureManagedControlPlane(t.Context(), &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + actual := (&AzureManagedControlPlaneReconciler{}).ClusterToAzureManagedControlPlane(t.Context(), &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ ControlPlaneRef: test.controlPlaneRef, }, }) @@ -103,7 +103,7 @@ func TestAzureManagedControlPlaneReconcilePaused(t *testing.T) { ctx := t.Context() sb := runtime.NewSchemeBuilder( - clusterv1.AddToScheme, + clusterv1beta1.AddToScheme, infrav1.AddToScheme, asoresourcesv1.AddToScheme, asocontainerservicev1.AddToScheme, @@ -130,12 +130,12 @@ func TestAzureManagedControlPlaneReconcilePaused(t *testing.T) { name := test.RandomName("paused", 10) namespace := "default" - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ Paused: true, }, } @@ -174,7 +174,7 @@ func TestAzureManagedControlPlaneReconcilePaused(t *testing.T) { OwnerReferences: []metav1.OwnerReference{ { Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), Name: cluster.Name, }, }, @@ -283,11 +283,11 @@ func TestAzureManagedControlPlaneReconcileNormal(t *testing.T) { Client: client, } - helper, err := patch.NewHelper(cp, client) + helper, err := v1beta1patch.NewHelper(cp, client) g.Expect(err).NotTo(HaveOccurred()) scopes := &scope.ManagedControlPlaneScope{ - Cluster: &clusterv1.Cluster{ + Cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "fake-cluster", Namespace: "fake-ns", diff --git a/controllers/azuremanagedcontrolplane_reconciler.go b/controllers/azuremanagedcontrolplane_reconciler.go index a6367532096..fcf5333016e 100644 --- a/controllers/azuremanagedcontrolplane_reconciler.go +++ b/controllers/azuremanagedcontrolplane_reconciler.go @@ -22,7 +22,7 @@ import ( "github.com/pkg/errors" "k8s.io/client-go/tools/clientcmd" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/secret" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -142,11 +142,11 @@ func (r *azureManagedControlPlaneService) reconcileKubeconfig(ctx context.Contex // When upgrading from an older version of CAPI, the kubeconfig secret may not have the required // cluster name label. Add it here to avoid kubeconfig issues during upgrades. - if _, ok := kubeConfigSecret.Labels[clusterv1.ClusterNameLabel]; !ok { + if _, ok := kubeConfigSecret.Labels[clusterv1beta1.ClusterNameLabel]; !ok { if kubeConfigSecret.Labels == nil { kubeConfigSecret.Labels = make(map[string]string) } - kubeConfigSecret.Labels[clusterv1.ClusterNameLabel] = r.scope.ClusterName() + kubeConfigSecret.Labels[clusterv1beta1.ClusterNameLabel] = r.scope.ClusterName() } return nil }); err != nil { diff --git a/controllers/azuremanagedmachinepool_controller.go b/controllers/azuremanagedmachinepool_controller.go index 9be7f1d95bb..3e409b69c46 100644 --- a/controllers/azuremanagedmachinepool_controller.go +++ b/controllers/azuremanagedmachinepool_controller.go @@ -24,11 +24,9 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -43,6 +41,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // AzureManagedMachinePoolReconciler reconciles an AzureManagedMachinePool object. @@ -103,7 +102,7 @@ func (ammpr *AzureManagedMachinePoolReconciler) SetupWithManager(ctx context.Con WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log, ammpr.WatchFilterValue)). // watch for changes in CAPI MachinePool resources Watches( - &expv1.MachinePool{}, + &clusterv1beta1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(MachinePoolToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("AzureManagedMachinePool"), log)), ). // watch for changes in AzureManagedControlPlanes @@ -111,9 +110,9 @@ func (ammpr *AzureManagedMachinePoolReconciler) SetupWithManager(ctx context.Con &infrav1.AzureManagedControlPlane{}, handler.EnqueueRequestsFromMapFunc(azureManagedControlPlaneMapper), ). - // Add a watch on clusterv1.Cluster object for pause/unpause & ready notifications. + // Add a watch on clusterv1beta1.Cluster object for pause/unpause & ready notifications. Watches( - &clusterv1.Cluster{}, + &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(azureManagedMachinePoolMapper), builder.WithPredicates( ClusterPauseChangeAndInfrastructureReady(mgr.GetScheme(), log), @@ -163,7 +162,7 @@ func (ammpr *AzureManagedMachinePoolReconciler) Reconcile(ctx context.Context, r } // Fetch the Cluster. - ownerCluster, err := util.GetOwnerCluster(ctx, ammpr.Client, ownerPool.ObjectMeta) + ownerCluster, err := clusterv1beta1util.GetOwnerCluster(ctx, ammpr.Client, ownerPool.ObjectMeta) if err != nil { return reconcile.Result{}, err } @@ -230,7 +229,7 @@ func (ammpr *AzureManagedMachinePoolReconciler) Reconcile(ctx context.Context, r }() // Return early if the object or Cluster is paused. - if annotations.IsPaused(ownerCluster, infraPool) { + if clusterv1beta1util.IsPaused(ownerCluster, infraPool) { log.Info("AzureManagedMachinePool or linked Cluster is marked as paused. Won't reconcile normally") return ammpr.reconcilePause(ctx, mcpScope) } @@ -269,8 +268,8 @@ func (ammpr *AzureManagedMachinePoolReconciler) reconcileNormal(ctx context.Cont scope.SetAgentPoolReady(false) // Ensure the ready condition is false, but do not overwrite an existing // error condition which might provide more details. - if conditions.IsTrue(scope.InfraMachinePool, infrav1.AgentPoolsReadyCondition) { - conditions.MarkFalse(scope.InfraMachinePool, infrav1.AgentPoolsReadyCondition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + if v1beta1conditions.IsTrue(scope.InfraMachinePool, infrav1.AgentPoolsReadyCondition) { + v1beta1conditions.MarkFalse(scope.InfraMachinePool, infrav1.AgentPoolsReadyCondition, infrav1.FailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) } // Handle transient and terminal errors diff --git a/controllers/azuremanagedmachinepool_controller_test.go b/controllers/azuremanagedmachinepool_controller_test.go index c41a4181c86..0a03e2b7247 100644 --- a/controllers/azuremanagedmachinepool_controller_test.go +++ b/controllers/azuremanagedmachinepool_controller_test.go @@ -30,8 +30,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -156,8 +155,7 @@ func TestAzureManagedMachinePoolReconcile(t *testing.T) { s := runtime.NewScheme() for _, addTo := range []func(s *runtime.Scheme) error{ scheme.AddToScheme, - clusterv1.AddToScheme, - expv1.AddToScheme, + clusterv1beta1.AddToScheme, infrav1.AddToScheme, corev1.AddToScheme, } { @@ -195,7 +193,7 @@ func TestAzureManagedMachinePoolReconcile(t *testing.T) { } } -func newReadyAzureManagedMachinePoolCluster() (*clusterv1.Cluster, *infrav1.AzureManagedCluster, *infrav1.AzureManagedControlPlane, *infrav1.AzureManagedMachinePool, *expv1.MachinePool) { +func newReadyAzureManagedMachinePoolCluster() (*clusterv1beta1.Cluster, *infrav1.AzureManagedCluster, *infrav1.AzureManagedControlPlane, *infrav1.AzureManagedMachinePool, *clusterv1beta1.MachinePool) { // AzureManagedCluster azManagedCluster := &infrav1.AzureManagedCluster{ ObjectMeta: metav1.ObjectMeta{ @@ -210,7 +208,7 @@ func newReadyAzureManagedMachinePoolCluster() (*clusterv1.Cluster, *infrav1.Azur }, }, Spec: infrav1.AzureManagedClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Host: "foo.bar", Port: 123, }, @@ -230,7 +228,7 @@ func newReadyAzureManagedMachinePoolCluster() (*clusterv1.Cluster, *infrav1.Azur }, }, Spec: infrav1.AzureManagedControlPlaneSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Host: "foo.bar", Port: 123, }, @@ -248,12 +246,12 @@ func newReadyAzureManagedMachinePoolCluster() (*clusterv1.Cluster, *infrav1.Azur }, } // Cluster - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "foo-cluster", Namespace: "foobar", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: infrav1.AzureManagedControlPlaneKind, @@ -284,7 +282,7 @@ func newReadyAzureManagedMachinePoolCluster() (*clusterv1.Cluster, *infrav1.Azur }, } // MachinePool - mp := &expv1.MachinePool{ + mp := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "foo-mp1", Namespace: "foobar", @@ -299,9 +297,9 @@ func newReadyAzureManagedMachinePoolCluster() (*clusterv1.Cluster, *infrav1.Azur }, }, }, - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ ClusterName: cluster.Name, InfrastructureRef: corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", diff --git a/controllers/helpers.go b/controllers/helpers.go index bfdadf3b0f8..09c00fe84ff 100644 --- a/controllers/helpers.go +++ b/controllers/helpers.go @@ -34,13 +34,12 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" capifeature "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -61,6 +60,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) const ( @@ -119,10 +119,10 @@ func AzureClusterToAzureMachinesMapper(_ context.Context, c client.Client, obj r return nil } - machineList := &clusterv1.MachineList{} + machineList := &clusterv1beta1.MachineList{} machineList.SetGroupVersionKind(gvk) // list all of the requested objects within the cluster namespace with the cluster name label - if err := c.List(ctx, machineList, client.InNamespace(azCluster.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}); err != nil { + if err := c.List(ctx, machineList, client.InNamespace(azCluster.Namespace), client.MatchingLabels{clusterv1beta1.ClusterNameLabel: clusterName}); err != nil { return nil } @@ -138,7 +138,7 @@ func AzureClusterToAzureMachinesMapper(_ context.Context, c client.Client, obj r }, nil } -// GetOwnerClusterName returns the name of the owning Cluster by finding a clusterv1.Cluster in the ownership references. +// GetOwnerClusterName returns the name of the owning Cluster by finding a clusterv1beta1.Cluster in the ownership references. func GetOwnerClusterName(obj metav1.ObjectMeta) (string, bool) { for _, ref := range obj.OwnerReferences { if ref.Kind != "Cluster" { @@ -148,7 +148,7 @@ func GetOwnerClusterName(obj metav1.ObjectMeta) (string, bool) { if err != nil { return "", false } - if gv.Group == clusterv1.GroupVersion.Group { + if gv.Group == clusterv1beta1.GroupVersion.Group { return ref.Name, true } } @@ -158,7 +158,7 @@ func GetOwnerClusterName(obj metav1.ObjectMeta) (string, bool) { // GetObjectsToRequestsByNamespaceAndClusterName returns the slice of ctrl.Requests consisting the list items contained in the unstructured list. func GetObjectsToRequestsByNamespaceAndClusterName(ctx context.Context, c client.Client, clusterKey client.ObjectKey, list *unstructured.UnstructuredList) []ctrl.Request { // list all of the requested objects within the cluster namespace with the cluster name label - if err := c.List(ctx, list, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name}); err != nil { + if err := c.List(ctx, list, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1beta1.ClusterNameLabel: clusterKey.Name}); err != nil { return nil } @@ -537,7 +537,7 @@ func reconcileAzureSecret(ctx context.Context, kubeclient client.Client, owner m } // GetOwnerMachinePool returns the MachinePool object owning the current resource. -func GetOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*expv1.MachinePool, error) { +func GetOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.MachinePool, error) { ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.GetOwnerMachinePool") defer done() @@ -550,7 +550,7 @@ func GetOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object return nil, errors.WithStack(err) } - if gv.Group == expv1.GroupVersion.Group { + if gv.Group == clusterv1beta1.GroupVersion.Group { return GetMachinePoolByName(ctx, c, obj.Namespace, ref.Name) } } @@ -580,11 +580,11 @@ func GetOwnerAzureMachinePool(ctx context.Context, c client.Client, obj metav1.O } // GetMachinePoolByName finds and return a MachinePool object using the specified params. -func GetMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*expv1.MachinePool, error) { +func GetMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.MachinePool, error) { ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.GetMachinePoolByName") defer done() - m := &expv1.MachinePool{} + m := &clusterv1beta1.MachinePool{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { return nil, err @@ -654,7 +654,7 @@ func clusterIdentityFinalizer(prefix, clusterNamespace, clusterName string) stri } // EnsureClusterIdentity ensures that the identity ref is allowed in the namespace and sets a finalizer. -func EnsureClusterIdentity(ctx context.Context, c client.Client, object conditions.Setter, identityRef *corev1.ObjectReference, finalizerPrefix string) error { +func EnsureClusterIdentity(ctx context.Context, c client.Client, object v1beta1conditions.Setter, identityRef *corev1.ObjectReference, finalizerPrefix string) error { name := object.GetName() namespace := object.GetNamespace() identity, err := GetClusterIdentityFromRef(ctx, c, namespace, identityRef) @@ -663,7 +663,7 @@ func EnsureClusterIdentity(ctx context.Context, c client.Client, object conditio } if !scope.IsClusterNamespaceAllowed(ctx, c, identity.Spec.AllowedNamespaces, namespace) { - conditions.MarkFalse(object, infrav1.NetworkInfrastructureReadyCondition, infrav1.NamespaceNotAllowedByIdentity, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(object, infrav1.NetworkInfrastructureReadyCondition, infrav1.NamespaceNotAllowedByIdentity, clusterv1beta1.ConditionSeverityError, "") return errors.New("AzureClusterIdentity list of allowed namespaces doesn't include current cluster namespace") } @@ -672,7 +672,7 @@ func EnsureClusterIdentity(ctx context.Context, c client.Client, object conditio needsPatch = controllerutil.AddFinalizer(identity, clusterIdentityFinalizer(finalizerPrefix, namespace, name)) || needsPatch if needsPatch { // finalizers are added/removed then patch the object - identityHelper, err := patch.NewHelper(identity, c) + identityHelper, err := v1beta1patch.NewHelper(identity, c) if err != nil { return errors.Wrap(err, "failed to init patch helper") } @@ -690,7 +690,7 @@ func RemoveClusterIdentityFinalizer(ctx context.Context, c client.Client, object if err != nil { return err } - identityHelper, err := patch.NewHelper(identity, c) + identityHelper, err := v1beta1patch.NewHelper(identity, c) if err != nil { return errors.Wrap(err, "failed to init patch helper") } @@ -706,7 +706,7 @@ func RemoveClusterIdentityFinalizer(ctx context.Context, c client.Client, object // MachinePool events and returns reconciliation requests for an infrastructure provider object. func MachinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Logger) handler.MapFunc { return func(_ context.Context, o client.Object) []reconcile.Request { - m, ok := o.(*expv1.MachinePool) + m, ok := o.(*clusterv1beta1.MachinePool) if !ok { log.V(4).Info("attempt to map incorrect type", "type", fmt.Sprintf("%T", o)) return nil @@ -766,10 +766,10 @@ func AzureManagedClusterToAzureManagedMachinePoolsMapper(_ context.Context, c cl return nil } - machineList := &expv1.MachinePoolList{} + machineList := &clusterv1beta1.MachinePoolList{} machineList.SetGroupVersionKind(gvk) // list all of the requested objects within the cluster namespace with the cluster name label - if err := c.List(ctx, machineList, client.InNamespace(azCluster.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}); err != nil { + if err := c.List(ctx, machineList, client.InNamespace(azCluster.Namespace), client.MatchingLabels{clusterv1beta1.ClusterNameLabel: clusterName}); err != nil { return nil } @@ -819,10 +819,10 @@ func AzureManagedControlPlaneToAzureManagedMachinePoolsMapper(_ context.Context, return nil } - machineList := &expv1.MachinePoolList{} + machineList := &clusterv1beta1.MachinePoolList{} machineList.SetGroupVersionKind(gvk) // list all of the requested objects within the cluster namespace with the cluster name label - if err := c.List(ctx, machineList, client.InNamespace(azControlPlane.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}); err != nil { + if err := c.List(ctx, machineList, client.InNamespace(azControlPlane.Namespace), client.MatchingLabels{clusterv1beta1.ClusterNameLabel: clusterName}); err != nil { return nil } @@ -860,7 +860,7 @@ func AzureManagedClusterToAzureManagedControlPlaneMapper(_ context.Context, c cl return nil } - cluster, err := util.GetOwnerCluster(ctx, c, azCluster.ObjectMeta) + cluster, err := clusterv1beta1util.GetOwnerCluster(ctx, c, azCluster.ObjectMeta) if err != nil { log.Error(err, "failed to get the owning cluster") return nil @@ -909,7 +909,7 @@ func AzureManagedControlPlaneToAzureManagedClusterMapper(_ context.Context, c cl return nil } - cluster, err := util.GetOwnerCluster(ctx, c, azManagedControlPlane.ObjectMeta) + cluster, err := clusterv1beta1util.GetOwnerCluster(ctx, c, azManagedControlPlane.ObjectMeta) if err != nil { log.Error(err, "failed to get the owning cluster") return nil @@ -943,13 +943,13 @@ func MachinePoolToAzureManagedControlPlaneMapFunc(_ context.Context, c client.Cl ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout) defer cancel() - machinePool, ok := o.(*expv1.MachinePool) + machinePool, ok := o.(*clusterv1beta1.MachinePool) if !ok { log.Info("expected a MachinePool, got wrong type", "type", fmt.Sprintf("%T", o)) return nil } - cluster, err := util.GetClusterByName(ctx, c, machinePool.ObjectMeta.Namespace, machinePool.Spec.ClusterName) + cluster, err := clusterv1beta1util.GetClusterByName(ctx, c, machinePool.ObjectMeta.Namespace, machinePool.Spec.ClusterName) if err != nil { log.Error(err, "failed to get the owning cluster") return nil @@ -1048,14 +1048,14 @@ func ClusterUpdatePauseChange(logger logr.Logger) predicate.Funcs { UpdateFunc: func(e event.UpdateEvent) bool { log := logger.WithValues("predicate", "ClusterUpdatePauseChange", "eventType", "update") - oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) + oldCluster, ok := e.ObjectOld.(*clusterv1beta1.Cluster) if !ok { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) return false } log = log.WithValues("Cluster", klog.KObj(oldCluster)) - newCluster := e.ObjectNew.(*clusterv1.Cluster) + newCluster := e.ObjectNew.(*clusterv1beta1.Cluster) if oldCluster.Spec.Paused != newCluster.Spec.Paused { log.V(4).Info("Cluster paused status changed, allowing further processing") @@ -1071,14 +1071,14 @@ func ClusterUpdatePauseChange(logger logr.Logger) predicate.Funcs { } } -// ClusterPauseChangeAndInfrastructureReady is based on ClusterUnpausedAndInfrastructureReady, but +// ClusterPauseChangeAndInfrastructureReady is based on ClusterUnpausedAndInfrastructureProvisioned, but // additionally accepts Cluster pause events. func ClusterPauseChangeAndInfrastructureReady(scheme *runtime.Scheme, log logr.Logger) predicate.Funcs { - return predicates.Any(scheme, log, predicates.ClusterCreateInfraReady(scheme, log), predicates.ClusterUpdateInfraReady(scheme, log), ClusterUpdatePauseChange(log)) //nolint:staticcheck + return predicates.Any(scheme, log, predicates.ClusterCreateInfraProvisioned(scheme, log), predicates.ClusterUpdateInfraProvisioned(scheme, log), ClusterUpdatePauseChange(log)) //nolint:staticcheck } // GetClusterScoper returns a ClusterScoper for the given cluster using the infra ref pointing to either an AzureCluster or an AzureManagedCluster. -func GetClusterScoper(ctx context.Context, logger logr.Logger, c client.Client, cluster *clusterv1.Cluster, timeouts reconciler.Timeouts, credCache azure.CredentialCache) (ClusterScoper, error) { +func GetClusterScoper(ctx context.Context, logger logr.Logger, c client.Client, cluster *clusterv1beta1.Cluster, timeouts reconciler.Timeouts, credCache azure.CredentialCache) (ClusterScoper, error) { infraRef := cluster.Spec.InfrastructureRef switch infraRef.Kind { case "AzureCluster": diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index 0e37e74f21c..0d0a63cf305 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -34,9 +34,8 @@ import ( "k8s.io/apimachinery/pkg/types" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/event" @@ -83,7 +82,7 @@ func TestAzureClusterToAzureMachinesMapper(t *testing.T) { { Name: clusterName, Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -94,7 +93,7 @@ func TestAzureClusterToAzureMachinesMapper(t *testing.T) { func TestGetCloudProviderConfig(t *testing.T) { g := NewWithT(t) scheme := runtime.NewScheme() - _ = clusterv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) _ = corev1.AddToScheme(scheme) @@ -107,7 +106,7 @@ func TestGetCloudProviderConfig(t *testing.T) { g.Expect((&infrav1.AzureClusterWebhook{}).Default(t.Context(), azureClusterCustomVnet)).NotTo(HaveOccurred()) cases := map[string]struct { - cluster *clusterv1.Cluster + cluster *clusterv1beta1.Cluster azureCluster *infrav1.AzureCluster identityType infrav1.VMIdentity identityID string @@ -376,15 +375,15 @@ func setupScheme(g *WithT) *runtime.Scheme { scheme := runtime.NewScheme() g.Expect(clientgoscheme.AddToScheme(scheme)).To(Succeed()) g.Expect(infrav1.AddToScheme(scheme)).To(Succeed()) - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) + g.Expect(clusterv1beta1.AddToScheme(scheme)).To(Succeed()) return scheme } -func newMachine(clusterName, machineName string) *clusterv1.Machine { - return &clusterv1.Machine{ +func newMachine(clusterName, machineName string) *clusterv1beta1.Machine { + return &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Name: machineName, Namespace: "default", @@ -392,7 +391,7 @@ func newMachine(clusterName, machineName string) *clusterv1.Machine { } } -func newMachineWithInfrastructureRef(clusterName, machineName string) *clusterv1.Machine { +func newMachineWithInfrastructureRef(clusterName, machineName string) *clusterv1beta1.Machine { m := newMachine(clusterName, machineName) m.Spec.InfrastructureRef = corev1.ObjectReference{ Kind: "AzureMachine", @@ -403,8 +402,8 @@ func newMachineWithInfrastructureRef(clusterName, machineName string) *clusterv1 return m } -func newCluster(name string) *clusterv1.Cluster { - return &clusterv1.Cluster{ +func newCluster(name string) *clusterv1beta1.Cluster { + return &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "default", @@ -903,7 +902,7 @@ func TestAzureManagedClusterToAzureManagedMachinePoolsMapper(t *testing.T) { { Name: clusterName, Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -968,7 +967,7 @@ func TestAzureManagedControlPlaneToAzureManagedMachinePoolsMapper(t *testing.T) { Name: cluster.Name, Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -1130,7 +1129,7 @@ func TestAzureManagedClusterToAzureManagedControlPlaneMapper(t *testing.T) { { Name: cluster.Name, Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -1159,7 +1158,7 @@ func TestAzureManagedControlPlaneToAzureManagedClusterMapper(t *testing.T) { { Name: cluster.Name, Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -1199,7 +1198,7 @@ func TestAzureManagedControlPlaneToAzureManagedClusterMapper(t *testing.T) { { Name: cluster.Name, Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -1224,7 +1223,7 @@ func newAzureManagedControlPlane(cpName string) *infrav1.AzureManagedControlPlan } } -func newManagedMachinePoolInfraReference(clusterName, poolName string) *expv1.MachinePool { +func newManagedMachinePoolInfraReference(clusterName, poolName string) *clusterv1beta1.MachinePool { m := newMachinePool(clusterName, poolName) m.Spec.ClusterName = clusterName m.Spec.Template.Spec.InfrastructureRef = corev1.ObjectReference{ @@ -1244,7 +1243,7 @@ func newAzureManagedMachinePool(clusterName, poolName, mode string) *infrav1.Azu return &infrav1.AzureManagedMachinePool{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Name: poolName, Namespace: "default", @@ -1267,22 +1266,22 @@ func newAzureManagedMachinePool(clusterName, poolName, mode string) *infrav1.Azu } } -func newMachinePool(clusterName, poolName string) *expv1.MachinePool { - return &expv1.MachinePool{ +func newMachinePool(clusterName, poolName string) *clusterv1beta1.MachinePool { + return &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Name: poolName, Namespace: "default", }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ Replicas: ptr.To[int32](2), }, } } -func newManagedMachinePoolWithInfrastructureRef(clusterName, poolName string) *expv1.MachinePool { +func newManagedMachinePoolWithInfrastructureRef(clusterName, poolName string) *clusterv1beta1.MachinePool { m := newMachinePool(clusterName, poolName) m.Spec.Template.Spec.InfrastructureRef = corev1.ObjectReference{ Kind: "AzureManagedMachinePool", @@ -1376,11 +1375,11 @@ func TestClusterPauseChangeAndInfrastructureReady(t *testing.T) { { name: "create cluster infra not ready, not paused", event: event.CreateEvent{ - Object: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + Object: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ Paused: false, }, - Status: clusterv1.ClusterStatus{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: false, }, }, @@ -1390,11 +1389,11 @@ func TestClusterPauseChangeAndInfrastructureReady(t *testing.T) { { name: "create cluster infra ready, not paused", event: event.CreateEvent{ - Object: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + Object: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ Paused: false, }, - Status: clusterv1.ClusterStatus{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, }, @@ -1404,11 +1403,11 @@ func TestClusterPauseChangeAndInfrastructureReady(t *testing.T) { { name: "create cluster infra not ready, paused", event: event.CreateEvent{ - Object: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + Object: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ Paused: true, }, - Status: clusterv1.ClusterStatus{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: false, }, }, @@ -1418,11 +1417,11 @@ func TestClusterPauseChangeAndInfrastructureReady(t *testing.T) { { name: "create cluster infra ready, paused", event: event.CreateEvent{ - Object: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + Object: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ Paused: true, }, - Status: clusterv1.ClusterStatus{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, }, @@ -1432,13 +1431,13 @@ func TestClusterPauseChangeAndInfrastructureReady(t *testing.T) { { name: "update cluster infra ready true->true", event: event.UpdateEvent{ - ObjectOld: &clusterv1.Cluster{ - Status: clusterv1.ClusterStatus{ + ObjectOld: &clusterv1beta1.Cluster{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, }, - ObjectNew: &clusterv1.Cluster{ - Status: clusterv1.ClusterStatus{ + ObjectNew: &clusterv1beta1.Cluster{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, }, @@ -1448,13 +1447,13 @@ func TestClusterPauseChangeAndInfrastructureReady(t *testing.T) { { name: "update cluster infra ready false->true", event: event.UpdateEvent{ - ObjectOld: &clusterv1.Cluster{ - Status: clusterv1.ClusterStatus{ + ObjectOld: &clusterv1beta1.Cluster{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: false, }, }, - ObjectNew: &clusterv1.Cluster{ - Status: clusterv1.ClusterStatus{ + ObjectNew: &clusterv1beta1.Cluster{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, }, @@ -1464,13 +1463,13 @@ func TestClusterPauseChangeAndInfrastructureReady(t *testing.T) { { name: "update cluster infra ready true->false", event: event.UpdateEvent{ - ObjectOld: &clusterv1.Cluster{ - Status: clusterv1.ClusterStatus{ + ObjectOld: &clusterv1beta1.Cluster{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, }, - ObjectNew: &clusterv1.Cluster{ - Status: clusterv1.ClusterStatus{ + ObjectNew: &clusterv1beta1.Cluster{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: false, }, }, @@ -1480,13 +1479,13 @@ func TestClusterPauseChangeAndInfrastructureReady(t *testing.T) { { name: "update cluster infra ready false->false", event: event.UpdateEvent{ - ObjectOld: &clusterv1.Cluster{ - Status: clusterv1.ClusterStatus{ + ObjectOld: &clusterv1beta1.Cluster{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: false, }, }, - ObjectNew: &clusterv1.Cluster{ - Status: clusterv1.ClusterStatus{ + ObjectNew: &clusterv1beta1.Cluster{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: false, }, }, @@ -1496,13 +1495,13 @@ func TestClusterPauseChangeAndInfrastructureReady(t *testing.T) { { name: "update cluster paused false->false", event: event.UpdateEvent{ - ObjectOld: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + ObjectOld: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ Paused: false, }, }, - ObjectNew: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + ObjectNew: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ Paused: false, }, }, @@ -1512,13 +1511,13 @@ func TestClusterPauseChangeAndInfrastructureReady(t *testing.T) { { name: "update cluster paused false->true", event: event.UpdateEvent{ - ObjectOld: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + ObjectOld: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ Paused: false, }, }, - ObjectNew: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + ObjectNew: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ Paused: true, }, }, @@ -1528,13 +1527,13 @@ func TestClusterPauseChangeAndInfrastructureReady(t *testing.T) { { name: "update cluster paused true->false", event: event.UpdateEvent{ - ObjectOld: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + ObjectOld: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ Paused: true, }, }, - ObjectNew: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + ObjectNew: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ Paused: false, }, }, @@ -1544,13 +1543,13 @@ func TestClusterPauseChangeAndInfrastructureReady(t *testing.T) { { name: "update cluster paused true->true", event: event.UpdateEvent{ - ObjectOld: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + ObjectOld: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ Paused: true, }, }, - ObjectNew: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ + ObjectNew: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ Paused: true, }, }, diff --git a/controllers/managedclusteradopt_controller.go b/controllers/managedclusteradopt_controller.go index ee28ad5efcd..73b89304444 100644 --- a/controllers/managedclusteradopt_controller.go +++ b/controllers/managedclusteradopt_controller.go @@ -25,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -96,12 +96,12 @@ func (r *ManagedClusterAdoptReconciler) Reconcile(ctx context.Context, req ctrl. log.Info("adopting") - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: managedCluster.Namespace, Name: managedCluster.Name, }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ APIVersion: infrav1.GroupVersion.Identifier(), Kind: infrav1.AzureASOManagedClusterKind, diff --git a/controllers/managedclusteradopt_controller_test.go b/controllers/managedclusteradopt_controller_test.go index 8eaad1049c4..6d35bdc0ed4 100644 --- a/controllers/managedclusteradopt_controller_test.go +++ b/controllers/managedclusteradopt_controller_test.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -67,7 +67,7 @@ func TestManagedClusterAdoptReconcile(t *testing.T) { s := runtime.NewScheme() err := asocontainerservicev1.AddToScheme(s) g.Expect(err).ToNot(HaveOccurred()) - err = clusterv1.AddToScheme(s) + err = clusterv1beta1.AddToScheme(s) g.Expect(err).ToNot(HaveOccurred()) err = asoresourcesv1.AddToScheme(s) g.Expect(err).ToNot(HaveOccurred()) diff --git a/docs/book/src/developers/getting-started-with-capi-operator.md b/docs/book/src/developers/getting-started-with-capi-operator.md index cb88381641c..e19f960ed2e 100644 --- a/docs/book/src/developers/getting-started-with-capi-operator.md +++ b/docs/book/src/developers/getting-started-with-capi-operator.md @@ -120,7 +120,7 @@ helm install cert-manager jetstack/cert-manager --namespace cert-manager --creat Create a `values.yaml` file for the CAPI Operator Helm chart like so: ```yaml -core: "cluster-api:v1.10.7" +core: "cluster-api:v1.11.2" infrastructure: "azure:v1.17.2" addon: "helm:v0.4.1" manager: diff --git a/exp/api/v1beta1/azuremachinepool_default_test.go b/exp/api/v1beta1/azuremachinepool_default_test.go index 03fbc4a3fa5..fafaa1be226 100644 --- a/exp/api/v1beta1/azuremachinepool_default_test.go +++ b/exp/api/v1beta1/azuremachinepool_default_test.go @@ -26,8 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -139,18 +138,17 @@ func TestAzureMachinePool_SetIdentityDefaults(t *testing.T) { scheme := runtime.NewScheme() _ = AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) - _ = clusterv1.AddToScheme(scheme) - _ = expv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "pool1", Namespace: "default", Labels: map[string]string{ - clusterv1.ClusterNameLabel: "testcluster", + clusterv1beta1.ClusterNameLabel: "testcluster", }, }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ ClusterName: "testcluster", }, } @@ -165,12 +163,12 @@ func TestAzureMachinePool_SetIdentityDefaults(t *testing.T) { }, }, } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "testcluster", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ Name: "testcluster", Namespace: "default", diff --git a/exp/api/v1beta1/azuremachinepool_types.go b/exp/api/v1beta1/azuremachinepool_types.go index 4856280dda0..398e426f86c 100644 --- a/exp/api/v1beta1/azuremachinepool_types.go +++ b/exp/api/v1beta1/azuremachinepool_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" ) @@ -309,7 +309,7 @@ type ( // Conditions defines current service state of the AzureMachinePool. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // LongRunningOperationStates saves the state for Azure long-running operations so they can be continued on the // next reconciliation loop. @@ -382,12 +382,12 @@ type ( ) // GetConditions returns the list of conditions for an AzureMachinePool API object. -func (amp *AzureMachinePool) GetConditions() clusterv1.Conditions { +func (amp *AzureMachinePool) GetConditions() clusterv1beta1.Conditions { return amp.Status.Conditions } // SetConditions will set the given conditions on an AzureMachinePool object. -func (amp *AzureMachinePool) SetConditions(conditions clusterv1.Conditions) { +func (amp *AzureMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { amp.Status.Conditions = conditions } diff --git a/exp/api/v1beta1/azuremachinepool_webhook_test.go b/exp/api/v1beta1/azuremachinepool_webhook_test.go index 4185fef7167..cd8ca3a43bf 100644 --- a/exp/api/v1beta1/azuremachinepool_webhook_test.go +++ b/exp/api/v1beta1/azuremachinepool_webhook_test.go @@ -35,8 +35,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" capifeature "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/controller-runtime/pkg/client" @@ -57,7 +56,7 @@ type mockClient struct { } func (m mockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - obj.(*expv1.MachinePool).Spec.Template.Spec.Version = &m.Version + obj.(*clusterv1beta1.MachinePool).Spec.Template.Spec.Version = &m.Version return nil } @@ -65,9 +64,9 @@ func (m mockClient) List(ctx context.Context, list client.ObjectList, opts ...cl if m.ReturnError { return errors.New("MachinePool.cluster.x-k8s.io \"mock-machinepool-mp-0\" not found") } - mp := &expv1.MachinePool{} + mp := &clusterv1beta1.MachinePool{} mp.Spec.Template.Spec.Version = &m.Version - list.(*expv1.MachinePoolList).Items = []expv1.MachinePool{*mp} + list.(*clusterv1beta1.MachinePoolList).Items = []clusterv1beta1.MachinePool{*mp} return nil } @@ -281,7 +280,7 @@ func (m mockDefaultClient) Get(ctx context.Context, key client.ObjectKey, obj cl switch obj := obj.(type) { case *infrav1.AzureCluster: obj.Spec.SubscriptionID = m.SubscriptionID - case *clusterv1.Cluster: + case *clusterv1beta1.Cluster: obj.Spec.InfrastructureRef = &corev1.ObjectReference{ Kind: infrav1.AzureClusterKind, Name: "test-cluster", @@ -293,11 +292,11 @@ func (m mockDefaultClient) Get(ctx context.Context, key client.ObjectKey, obj cl } func (m mockDefaultClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*expv1.MachinePoolList).Items = []expv1.MachinePool{ + list.(*clusterv1beta1.MachinePoolList).Items = []clusterv1beta1.MachinePool{ { - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ InfrastructureRef: corev1.ObjectReference{ Name: m.Name, }, diff --git a/exp/api/v1beta1/azuremachinepoolmachine_types.go b/exp/api/v1beta1/azuremachinepoolmachine_types.go index bede7e9b06e..c95f6c012ce 100644 --- a/exp/api/v1beta1/azuremachinepoolmachine_types.go +++ b/exp/api/v1beta1/azuremachinepoolmachine_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" ) @@ -84,7 +84,7 @@ type ( // Conditions defines current service state of the AzureMachinePool. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // LongRunningOperationStates saves the state for Azure long running operations so they can be continued on the // next reconciliation loop. @@ -133,12 +133,12 @@ type ( ) // GetConditions returns the list of conditions for an AzureMachinePool API object. -func (ampm *AzureMachinePoolMachine) GetConditions() clusterv1.Conditions { +func (ampm *AzureMachinePoolMachine) GetConditions() clusterv1beta1.Conditions { return ampm.Status.Conditions } // SetConditions will set the given conditions on an AzureMachinePool object. -func (ampm *AzureMachinePoolMachine) SetConditions(conditions clusterv1.Conditions) { +func (ampm *AzureMachinePoolMachine) SetConditions(conditions clusterv1beta1.Conditions) { ampm.Status.Conditions = conditions } diff --git a/exp/api/v1beta1/zz_generated.deepcopy.go b/exp/api/v1beta1/zz_generated.deepcopy.go index c887e79abec..2083b59c5b5 100644 --- a/exp/api/v1beta1/zz_generated.deepcopy.go +++ b/exp/api/v1beta1/zz_generated.deepcopy.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" apiv1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - cluster_apiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -226,7 +226,7 @@ func (in *AzureMachinePoolMachineStatus) DeepCopyInto(out *AzureMachinePoolMachi } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(cluster_apiapiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -405,7 +405,7 @@ func (in *AzureMachinePoolStatus) DeepCopyInto(out *AzureMachinePoolStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(cluster_apiapiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/exp/controllers/azuremachinepool_controller.go b/exp/controllers/azuremachinepool_controller.go index 3abec0c2af4..c37a4b68879 100644 --- a/exp/controllers/azuremachinepool_controller.go +++ b/exp/controllers/azuremachinepool_controller.go @@ -28,11 +28,9 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -49,6 +47,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) type ( @@ -122,7 +121,7 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log, ampr.WatchFilterValue)). // watch for changes in CAPI MachinePool resources Watches( - &expv1.MachinePool{}, + &clusterv1beta1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(MachinePoolToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind(infrav1.AzureMachinePoolKind), log)), ). // watch for changes in AzureCluster resources @@ -143,9 +142,9 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg predicates.ResourceHasFilterLabel(mgr.GetScheme(), log, ampr.WatchFilterValue), ), ). - // Add a watch on clusterv1.Cluster object for unpause & ready notifications. + // Add a watch on clusterv1beta1.Cluster object for unpause & ready notifications. Watches( - &clusterv1.Cluster{}, + &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(azureMachinePoolMapper), builder.WithPredicates( infracontroller.ClusterPauseChangeAndInfrastructureReady(mgr.GetScheme(), log), @@ -214,7 +213,7 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl. logger = logger.WithValues("machinePool", machinePool.Name) // Fetch the Cluster. - cluster, err := util.GetClusterFromMetadata(ctx, ampr.Client, machinePool.ObjectMeta) + cluster, err := clusterv1beta1util.GetClusterFromMetadata(ctx, ampr.Client, machinePool.ObjectMeta) if err != nil { logger.V(2).Info("MachinePool is missing cluster label or cluster does not exist") return reconcile.Result{}, nil @@ -246,7 +245,7 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl. }() // Return early if the object or Cluster is paused. - if annotations.IsPaused(cluster, azMachinePool) { + if clusterv1beta1util.IsPaused(cluster, azMachinePool) { logger.V(2).Info("AzureMachinePool or linked Cluster is marked as paused. Won't reconcile normally") return ampr.reconcilePause(ctx, machinePoolScope) } @@ -260,7 +259,7 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl. return ampr.reconcileNormal(ctx, machinePoolScope, cluster) } -func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope *scope.MachinePoolScope, cluster *clusterv1.Cluster) (_ reconcile.Result, reterr error) { +func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope *scope.MachinePoolScope, cluster *clusterv1beta1.Cluster) (_ reconcile.Result, reterr error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolReconciler.reconcileNormal") defer done() @@ -273,7 +272,7 @@ func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, mac } // Register the finalizer immediately to avoid orphaning Azure resources on delete - needsPatch := controllerutil.AddFinalizer(machinePoolScope.AzureMachinePool, expv1.MachinePoolFinalizer) + needsPatch := controllerutil.AddFinalizer(machinePoolScope.AzureMachinePool, clusterv1beta1.MachinePoolFinalizer) needsPatch = machinePoolScope.SetInfrastructureMachineKind() || needsPatch // Register the block-move annotation immediately to avoid moving un-paused ASO resources needsPatch = infracontroller.AddBlockMoveAnnotation(machinePoolScope.AzureMachinePool) || needsPatch @@ -451,6 +450,6 @@ func (ampr *AzureMachinePoolReconciler) reconcileDelete(ctx context.Context, mac // Delete succeeded, remove finalizer log.V(4).Info("removing finalizer for AzureMachinePool") - controllerutil.RemoveFinalizer(machinePoolScope.AzureMachinePool, expv1.MachinePoolFinalizer) + controllerutil.RemoveFinalizer(machinePoolScope.AzureMachinePool, clusterv1beta1.MachinePoolFinalizer) return reconcile.Result{}, nil } diff --git a/exp/controllers/azuremachinepool_controller_test.go b/exp/controllers/azuremachinepool_controller_test.go index 21582a59246..91b8bf64154 100644 --- a/exp/controllers/azuremachinepool_controller_test.go +++ b/exp/controllers/azuremachinepool_controller_test.go @@ -26,8 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -67,9 +66,8 @@ func TestAzureMachinePoolReconcilePaused(t *testing.T) { ctx := t.Context() sb := runtime.NewSchemeBuilder( - clusterv1.AddToScheme, + clusterv1beta1.AddToScheme, infrav1.AddToScheme, - expv1.AddToScheme, infrav1exp.AddToScheme, corev1.AddToScheme, ) @@ -85,12 +83,12 @@ func TestAzureMachinePoolReconcilePaused(t *testing.T) { name := test.RandomName("paused", 10) namespace := "default" - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ Paused: true, InfrastructureRef: &corev1.ObjectReference{ Kind: "AzureCluster", @@ -145,18 +143,18 @@ func TestAzureMachinePoolReconcilePaused(t *testing.T) { g.Expect(c.Create(ctx, fakeIdentity)).To(Succeed()) g.Expect(c.Create(ctx, fakeSecret)).To(Succeed()) - mp := &expv1.MachinePool{ + mp := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Labels: map[string]string{ - clusterv1.ClusterNameLabel: name, + clusterv1beta1.ClusterNameLabel: name, }, }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ ClusterName: name, - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ ClusterName: name, }, }, @@ -171,7 +169,7 @@ func TestAzureMachinePoolReconcilePaused(t *testing.T) { OwnerReferences: []metav1.OwnerReference{ { Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), Name: mp.Name, }, }, diff --git a/exp/controllers/azuremachinepool_controller_unit_test.go b/exp/controllers/azuremachinepool_controller_unit_test.go index f0c1e05248a..0948b41f28a 100644 --- a/exp/controllers/azuremachinepool_controller_unit_test.go +++ b/exp/controllers/azuremachinepool_controller_unit_test.go @@ -25,8 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure/mock_azure" @@ -79,8 +78,7 @@ func Test_newAzureMachinePoolService(t *testing.T) { func newScheme(g *GomegaWithT) *runtime.Scheme { scheme := runtime.NewScheme() for _, f := range []func(*runtime.Scheme) error{ - clusterv1.AddToScheme, - expv1.AddToScheme, + clusterv1beta1.AddToScheme, infrav1.AddToScheme, infrav1exp.AddToScheme, } { @@ -89,16 +87,16 @@ func newScheme(g *GomegaWithT) *runtime.Scheme { return scheme } -func newMachinePool(clusterName, poolName string) *expv1.MachinePool { - return &expv1.MachinePool{ +func newMachinePool(clusterName, poolName string) *clusterv1beta1.MachinePool { + return &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Name: poolName, Namespace: "default", }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ Replicas: ptr.To[int32](2), }, } @@ -108,7 +106,7 @@ func newAzureMachinePool(clusterName, poolName string) *infrav1exp.AzureMachineP return &infrav1exp.AzureMachinePool{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Name: poolName, Namespace: "default", @@ -116,7 +114,7 @@ func newAzureMachinePool(clusterName, poolName string) *infrav1exp.AzureMachineP } } -func newMachinePoolWithInfrastructureRef(clusterName, poolName string) *expv1.MachinePool { +func newMachinePoolWithInfrastructureRef(clusterName, poolName string) *clusterv1beta1.MachinePool { m := newMachinePool(clusterName, poolName) m.Spec.Template.Spec.InfrastructureRef = corev1.ObjectReference{ Kind: infrav1.AzureMachinePoolKind, @@ -134,7 +132,7 @@ func newAzureCluster(clusterName string) *infrav1.AzureCluster { Namespace: "default", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), Kind: "Cluster", Name: clusterName, }, @@ -143,8 +141,8 @@ func newAzureCluster(clusterName string) *infrav1.AzureCluster { } } -func newCluster(name string) *clusterv1.Cluster { - return &clusterv1.Cluster{ +func newCluster(name string) *clusterv1beta1.Cluster { + return &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "default", diff --git a/exp/controllers/azuremachinepool_reconciler_test.go b/exp/controllers/azuremachinepool_reconciler_test.go index 29ed63a8fd8..4144a4dd3af 100644 --- a/exp/controllers/azuremachinepool_reconciler_test.go +++ b/exp/controllers/azuremachinepool_reconciler_test.go @@ -23,8 +23,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" . "github.com/onsi/gomega" "go.uber.org/mock/gomock" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -77,9 +76,9 @@ func TestAzureMachinePoolServiceReconcile(t *testing.T) { scope: &scope.MachinePoolScope{ ClusterScoper: &scope.ClusterScope{ AzureCluster: &infrav1.AzureCluster{}, - Cluster: &clusterv1.Cluster{}, + Cluster: &clusterv1beta1.Cluster{}, }, - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{ Spec: infrav1exp.AzureMachinePoolSpec{ Template: infrav1exp.AzureMachinePoolMachineTemplate{ @@ -218,9 +217,9 @@ func TestAzureMachinePoolServiceDelete(t *testing.T) { scope: &scope.MachinePoolScope{ ClusterScoper: &scope.ClusterScope{ AzureCluster: &infrav1.AzureCluster{}, - Cluster: &clusterv1.Cluster{}, + Cluster: &clusterv1beta1.Cluster{}, }, - MachinePool: &expv1.MachinePool{}, + MachinePool: &clusterv1beta1.MachinePool{}, AzureMachinePool: &infrav1exp.AzureMachinePool{}, }, services: []azure.ServiceReconciler{ diff --git a/exp/controllers/azuremachinepoolmachine_controller.go b/exp/controllers/azuremachinepoolmachine_controller.go index 5198fd27702..3e284587c81 100644 --- a/exp/controllers/azuremachinepoolmachine_controller.go +++ b/exp/controllers/azuremachinepoolmachine_controller.go @@ -26,10 +26,9 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -47,6 +46,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) type ( @@ -109,7 +109,7 @@ func (ampmr *AzureMachinePoolMachineController) SetupWithManager(ctx context.Con ). // Add a watch on CAPI Machines for MachinePool Machines Watches( - &clusterv1.Machine{}, + &clusterv1beta1.Machine{}, handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind("AzureMachinePoolMachine"))), builder.WithPredicates( predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log, ampmr.WatchFilterValue), @@ -155,7 +155,7 @@ func (ampmr *AzureMachinePoolMachineController) Reconcile(ctx context.Context, r logger.V(2).Info("Fetching cluster for AzureMachinePoolMachine", "ampm", azureMachine.Name) // Fetch the Cluster. - cluster, err := util.GetClusterFromMetadata(ctx, ampmr.Client, azureMachine.ObjectMeta) + cluster, err := clusterv1beta1util.GetClusterFromMetadata(ctx, ampmr.Client, azureMachine.ObjectMeta) if err != nil { logger.Info("AzureMachinePoolMachine is missing cluster label or cluster does not exist") return reconcile.Result{}, nil @@ -164,7 +164,7 @@ func (ampmr *AzureMachinePoolMachineController) Reconcile(ctx context.Context, r logger = logger.WithValues("cluster", cluster.Name) // Return early if the object or Cluster is paused. - if annotations.IsPaused(cluster, azureMachine) { + if clusterv1beta1util.IsPaused(cluster, azureMachine) { logger.Info("AzureMachinePoolMachine or linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } @@ -204,7 +204,7 @@ func (ampmr *AzureMachinePoolMachineController) Reconcile(ctx context.Context, r } // Fetch the CAPI Machine. - machine, err := util.GetOwnerMachine(ctx, ampmr.Client, azureMachine.ObjectMeta) + machine, err := clusterv1beta1util.GetOwnerMachine(ctx, ampmr.Client, azureMachine.ObjectMeta) if err != nil && !apierrors.IsNotFound(err) { return reconcile.Result{}, err } @@ -303,7 +303,7 @@ func (ampmr *AzureMachinePoolMachineController) reconcileNormal(ctx context.Cont log.V(2).Info(fmt.Sprintf("Scale Set VM is %s", state), "id", machineScope.ProviderID()) - bootstrappingCondition := conditions.Get(machineScope.AzureMachinePoolMachine, infrav1.BootstrapSucceededCondition) + bootstrappingCondition := v1beta1conditions.Get(machineScope.AzureMachinePoolMachine, infrav1.BootstrapSucceededCondition) if bootstrappingCondition != nil && bootstrappingCondition.Reason == infrav1.BootstrapFailedReason { return reconcile.Result{}, nil } diff --git a/exp/controllers/azuremachinepoolmachine_controller_test.go b/exp/controllers/azuremachinepoolmachine_controller_test.go index a03142dd83d..3a2503bfd45 100644 --- a/exp/controllers/azuremachinepoolmachine_controller_test.go +++ b/exp/controllers/azuremachinepoolmachine_controller_test.go @@ -28,8 +28,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -101,7 +100,7 @@ func TestAzureMachinePoolMachineReconciler_Reconcile(t *testing.T) { Verify: func(g *WithT, c client.Client, result ctrl.Result, err error) { g.Expect(err).NotTo(HaveOccurred()) - machine := &clusterv1.Machine{} + machine := &clusterv1beta1.Machine{} err = c.Get(t.Context(), types.NamespacedName{ Name: "ma1", Namespace: "default", @@ -139,8 +138,7 @@ func TestAzureMachinePoolMachineReconciler_Reconcile(t *testing.T) { scheme = func() *runtime.Scheme { s := runtime.NewScheme() for _, addTo := range []func(s *runtime.Scheme) error{ - clusterv1.AddToScheme, - expv1.AddToScheme, + clusterv1beta1.AddToScheme, infrav1.AddToScheme, infrav1exp.AddToScheme, corev1.AddToScheme, @@ -192,7 +190,7 @@ func getReadyMachinePoolMachineClusterObjects(ampmIsDeleting bool, ampmProvision }, } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: "Cluster", }, @@ -200,19 +198,19 @@ func getReadyMachinePoolMachineClusterObjects(ampmIsDeleting bool, ampmProvision Name: "cluster1", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ Name: azCluster.Name, Namespace: "default", Kind: "AzureCluster", }, }, - Status: clusterv1.ClusterStatus{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, } - mp := &expv1.MachinePool{ + mp := &clusterv1beta1.MachinePool{ TypeMeta: metav1.TypeMeta{ Kind: "MachinePool", }, @@ -236,13 +234,13 @@ func getReadyMachinePoolMachineClusterObjects(ampmIsDeleting bool, ampmProvision { Name: mp.Name, Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, } - ma := &clusterv1.Machine{ + ma := &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "ma1", Namespace: "default", @@ -250,7 +248,7 @@ func getReadyMachinePoolMachineClusterObjects(ampmIsDeleting bool, ampmProvision { Name: mp.Name, Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, Labels: map[string]string{ @@ -265,7 +263,7 @@ func getReadyMachinePoolMachineClusterObjects(ampmIsDeleting bool, ampmProvision Namespace: "default", Finalizers: []string{"test"}, Labels: map[string]string{ - clusterv1.ClusterNameLabel: cluster.Name, + clusterv1beta1.ClusterNameLabel: cluster.Name, }, OwnerReferences: []metav1.OwnerReference{ { @@ -276,7 +274,7 @@ func getReadyMachinePoolMachineClusterObjects(ampmIsDeleting bool, ampmProvision { Name: ma.Name, Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -342,7 +340,7 @@ func getDeletingMachinePoolObjects() []client.Object { }, } - cluster := &clusterv1.Cluster{ + cluster := &clusterv1beta1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: "Cluster", }, @@ -350,19 +348,19 @@ func getDeletingMachinePoolObjects() []client.Object { Name: "cluster1", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{ + Spec: clusterv1beta1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ Name: azCluster.Name, Namespace: "default", Kind: "AzureCluster", }, }, - Status: clusterv1.ClusterStatus{ + Status: clusterv1beta1.ClusterStatus{ InfrastructureReady: true, }, } - mp := &expv1.MachinePool{ + mp := &clusterv1beta1.MachinePool{ TypeMeta: metav1.TypeMeta{ Kind: "MachinePool", }, @@ -394,7 +392,7 @@ func getDeletingMachinePoolObjects() []client.Object { { Name: mp.Name, Kind: "MachinePool", - APIVersion: expv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, @@ -409,7 +407,7 @@ func getDeletingMachinePoolObjects() []client.Object { }, Finalizers: []string{infrav1exp.AzureMachinePoolMachineFinalizer}, Labels: map[string]string{ - clusterv1.ClusterNameLabel: cluster.Name, + clusterv1beta1.ClusterNameLabel: cluster.Name, }, OwnerReferences: []metav1.OwnerReference{ { diff --git a/exp/controllers/helpers.go b/exp/controllers/helpers.go index 43f0a22e065..54083c64ab1 100644 --- a/exp/controllers/helpers.go +++ b/exp/controllers/helpers.go @@ -27,9 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -42,6 +40,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/controllers" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) // AzureClusterToAzureMachinePoolsMapper creates a mapping handler to transform AzureClusters into AzureMachinePools. The transform @@ -77,10 +76,10 @@ func AzureClusterToAzureMachinePoolsMapper(_ context.Context, c client.Client, s return nil } - machineList := &expv1.MachinePoolList{} + machineList := &clusterv1beta1.MachinePoolList{} machineList.SetGroupVersionKind(gvk) // list all of the requested objects within the cluster namespace with the cluster name label - if err := c.List(ctx, machineList, client.InNamespace(azCluster.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}); err != nil { + if err := c.List(ctx, machineList, client.InNamespace(azCluster.Namespace), client.MatchingLabels{clusterv1beta1.ClusterNameLabel: clusterName}); err != nil { log.V(4).Info(fmt.Sprintf("unable to list machine pools in cluster %s", clusterName)) return nil } @@ -130,10 +129,10 @@ func AzureManagedControlPlaneToAzureMachinePoolsMapper(_ context.Context, c clie return nil } - machineList := &expv1.MachinePoolList{} + machineList := &clusterv1beta1.MachinePoolList{} machineList.SetGroupVersionKind(gvk) // list all of the requested objects within the cluster namespace with the cluster name label - if err := c.List(ctx, machineList, client.InNamespace(azControlPlane.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}); err != nil { + if err := c.List(ctx, machineList, client.InNamespace(azControlPlane.Namespace), client.MatchingLabels{clusterv1beta1.ClusterNameLabel: clusterName}); err != nil { log.V(4).Info(fmt.Sprintf("unable to list machine pools in cluster %s", clusterName)) return nil } @@ -197,7 +196,7 @@ func AzureMachinePoolMachineMapper(scheme *runtime.Scheme, log logr.Logger) hand // MachinePool events and returns reconciliation requests for an infrastructure provider object. func MachinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Logger) handler.MapFunc { return func(_ context.Context, o client.Object) []reconcile.Request { - m, ok := o.(*expv1.MachinePool) + m, ok := o.(*clusterv1beta1.MachinePool) if !ok { log.V(4).Info("attempt to map incorrect type", "type", fmt.Sprintf("%T", o)) return nil @@ -237,7 +236,7 @@ func AzureClusterToAzureMachinePoolsFunc(_ context.Context, c client.Client, log } logWithValues := log.WithValues("AzureCluster", ac.Name, "Namespace", ac.Namespace) - cluster, err := util.GetOwnerCluster(ctx, c, ac.ObjectMeta) + cluster, err := clusterv1beta1util.GetOwnerCluster(ctx, c, ac.ObjectMeta) switch { case apierrors.IsNotFound(err) || cluster == nil: logWithValues.V(4).Info("owning cluster not found") @@ -247,7 +246,7 @@ func AzureClusterToAzureMachinePoolsFunc(_ context.Context, c client.Client, log return nil } - labels := map[string]string{clusterv1.ClusterNameLabel: cluster.Name} + labels := map[string]string{clusterv1beta1.ClusterNameLabel: cluster.Name} ampl := &infrav1exp.AzureMachinePoolList{} if err := c.List(ctx, ampl, client.InNamespace(ac.Namespace), client.MatchingLabels(labels)); err != nil { logWithValues.Error(err, "failed to list AzureMachinePools") @@ -283,7 +282,7 @@ func AzureMachinePoolToAzureMachinePoolMachines(_ context.Context, c client.Clie logWithValues := log.WithValues("AzureMachinePool", amp.Name, "Namespace", amp.Namespace) labels := map[string]string{ - clusterv1.ClusterNameLabel: amp.Labels[clusterv1.ClusterNameLabel], + clusterv1beta1.ClusterNameLabel: amp.Labels[clusterv1beta1.ClusterNameLabel], infrav1exp.MachinePoolNameLabel: amp.Name, } ampml := &infrav1exp.AzureMachinePoolMachineList{} @@ -396,7 +395,7 @@ func BootstrapConfigToInfrastructureMapFunc(c client.Client, log logr.Logger) ha Namespace: o.GetNamespace(), Name: mpName, } - mp := &expv1.MachinePool{} + mp := &clusterv1beta1.MachinePool{} if err := c.Get(ctx, mpKey, mp); err != nil { if !apierrors.IsNotFound(err) { log.Error(err, "failed to fetch MachinePool to validate Bootstrap.ConfigRef") diff --git a/exp/controllers/helpers_test.go b/exp/controllers/helpers_test.go index 37dc1fa0e03..8c9d455b69c 100644 --- a/exp/controllers/helpers_test.go +++ b/exp/controllers/helpers_test.go @@ -29,9 +29,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + bootstrapv1beta1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -51,12 +50,11 @@ var _ = Describe("BootstrapConfigToInfrastructureMapFunc", func() { It("should map bootstrap config to machine pool", func() { ctx := context.Background() scheme := runtime.NewScheme() - Expect(kubeadmv1.AddToScheme(scheme)).Should(Succeed()) - Expect(expv1.AddToScheme(scheme)).Should(Succeed()) - Expect(clusterv1.AddToScheme(scheme)).Should(Succeed()) + Expect(bootstrapv1beta1.AddToScheme(scheme)).Should(Succeed()) + Expect(clusterv1beta1.AddToScheme(scheme)).Should(Succeed()) fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() mapFn := BootstrapConfigToInfrastructureMapFunc(fakeClient, ctrl.Log) - bootstrapConfig := kubeadmv1.KubeadmConfig{ + bootstrapConfig := bootstrapv1beta1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "bootstrap-test", Namespace: "default", @@ -94,15 +92,15 @@ var _ = Describe("BootstrapConfigToInfrastructureMapFunc", func() { Expect(mapFn(ctx, &bootstrapConfig)).Should(Equal([]ctrl.Request{})) By("doing nothing if the MachinePool has no BootstrapConfigRef") - machinePool := expv1.MachinePool{ + machinePool := clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-pool-test", Namespace: "default", }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ ClusterName: "test-cluster", - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ ClusterName: "test-cluster", }, }, @@ -112,7 +110,7 @@ var _ = Describe("BootstrapConfigToInfrastructureMapFunc", func() { Expect(mapFn(ctx, &bootstrapConfig)).Should(Equal([]ctrl.Request{})) By("doing nothing if the MachinePool has a different BootstrapConfigRef Kind") - machinePool.Spec.Template.Spec.Bootstrap = clusterv1.Bootstrap{ + machinePool.Spec.Template.Spec.Bootstrap = clusterv1beta1.Bootstrap{ ConfigRef: &corev1.ObjectReference{ APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", Kind: "OtherBootstrapConfig", @@ -168,7 +166,7 @@ func TestAzureClusterToAzureMachinePoolsMapper(t *testing.T) { { Name: clusterName, Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, diff --git a/go.mod b/go.mod index e6b414aae89..9addca40d3d 100644 --- a/go.mod +++ b/go.mod @@ -47,25 +47,24 @@ require ( golang.org/x/mod v0.29.0 golang.org/x/text v0.30.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.32.3 - k8s.io/cluster-bootstrap v0.32.3 - k8s.io/component-base v0.32.3 + k8s.io/api v0.33.3 + k8s.io/apimachinery v0.33.3 + k8s.io/client-go v0.33.3 + k8s.io/cluster-bootstrap v0.33.3 + k8s.io/component-base v0.33.3 k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.32.3 k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e sigs.k8s.io/cloud-provider-azure v1.32.3 - sigs.k8s.io/cluster-api v1.10.7 - sigs.k8s.io/cluster-api/test v1.10.7 - sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/cluster-api v1.11.2 + sigs.k8s.io/cluster-api/test v1.11.2 + sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/kind v0.30.0 ) require ( al.essio.dev/pkg/shellescape v1.5.1 // indirect cel.dev/expr v0.24.0 // indirect - dario.cat/mergo v1.0.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 // indirect @@ -79,24 +78,23 @@ require ( github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect - github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/adrg/xdg v0.5.3 // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.6.1 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v28.0.2+incompatible // indirect + github.com/docker/docker v28.3.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect @@ -115,23 +113,19 @@ require ( github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang-jwt/jwt/v5 v5.3.0 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.1 // indirect + github.com/google/cel-go v0.23.2 // indirect github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-github/v53 v53.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/gorilla/websocket v1.5.3 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jellydator/ttlcache/v3 v3.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -143,10 +137,9 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -168,15 +161,13 @@ require ( github.com/rotisserie/eris v0.5.4 // indirect github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/samber/lo v1.49.1 // indirect - github.com/shopspring/decimal v1.4.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.12.0 // indirect github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.9.1 // indirect - github.com/spf13/viper v1.20.0 // indirect + github.com/spf13/viper v1.20.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/valyala/fastjson v1.6.4 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect @@ -186,6 +177,7 @@ require ( go.opentelemetry.io/proto/otlp v1.7.1 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/net v0.45.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect @@ -201,8 +193,8 @@ require ( google.golang.org/protobuf v1.36.8 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/apiextensions-apiserver v0.32.3 // indirect - k8s.io/apiserver v0.32.3 // indirect + k8s.io/apiextensions-apiserver v0.33.3 // indirect + k8s.io/apiserver v0.33.3 // indirect k8s.io/cli-runtime v0.32.3 // indirect k8s.io/cloud-provider v0.32.2 // indirect k8s.io/component-helpers v0.32.3 // indirect @@ -215,5 +207,5 @@ require ( sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index 6388531553c..e74006c2715 100644 --- a/go.sum +++ b/go.sum @@ -130,8 +130,6 @@ github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYW github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator/v11 v11.0.2-0.20250122183457-e11347878e23 h1:I+Cy77zrFmVWIHOZaxiNV4L7w9xuVux9LMqAblGzvdE= github.com/asaskevich/govalidator/v11 v11.0.2-0.20250122183457-e11347878e23/go.mod h1:S7DsXubvw3xBC8rSI+qmzcTNw7xEND0ojHPqglh/whY= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -152,17 +150,16 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= github.com/coredns/corefile-migration v1.0.28 h1:O8YafUREqUcGbRtcJfOmWU6ifcw2HX76I1QvI5xZpsw= github.com/coredns/corefile-migration v1.0.28/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -174,16 +171,14 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v28.0.2+incompatible h1:9BILleFwug5FSSqWBgVevgL3ewDJfWWWyZVqlDMttE8= -github.com/docker/docker v28.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 h1:7QPwrLT79GlD5sizHf27aoY2RTvw62mO6x7mxkScNk0= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= @@ -235,8 +230,6 @@ github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= -github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= @@ -247,8 +240,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.1 h1:AfVXx3chM2qwoSbM7Da8g8hX8OVSkBFwX+rz2+PcK40= -github.com/google/cel-go v0.22.1/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -269,18 +262,12 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -305,8 +292,6 @@ github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= -github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= -github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= @@ -357,6 +342,10 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -428,8 +417,6 @@ github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+D github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= @@ -441,8 +428,8 @@ github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wx github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY= -github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -468,38 +455,14 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= -github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= -github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= -github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= -github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.20 h1:aKfz3nPZECWoZJXMSH9y6h2adXjtOHaHTGEVCuCmaz0= -go.etcd.io/etcd/api/v3 v3.5.20/go.mod h1:QqKGViq4KTgOG43dr/uH0vmGWIaoJY3ggFi6ZH0TH/U= -go.etcd.io/etcd/client/pkg/v3 v3.5.20 h1:sZIAtra+xCo56gdf6BR62to/hiie5Bwl7hQIqMzVTEM= -go.etcd.io/etcd/client/pkg/v3 v3.5.20/go.mod h1:qaOi1k4ZA9lVLejXNvyPABrVEe7VymMF2433yyRQ7O0= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.20 h1:jMT2MwQEhyvhQg49Cec+1ZHJzfUf6ZgcmV0GjPv0tIQ= -go.etcd.io/etcd/client/v3 v3.5.20/go.mod h1:J5lbzYRMUR20YolS5UjlqqMcu3/wdEvG5VNBhzyo3m0= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= @@ -534,6 +497,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -599,8 +564,6 @@ gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0 gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= @@ -618,8 +581,6 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSP gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= -gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -627,24 +588,24 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= -k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= -k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= +k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= +k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= +k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= +k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= +k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= +k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.3 h1:Wv0hGc+QFdMJB4ZSiHrCgN3zL3QRatu56+rpccKC3J4= +k8s.io/apiserver v0.33.3/go.mod h1:05632ifFEe6TxwjdAIrwINHWE2hLwyADFk5mBsQa15E= k8s.io/cli-runtime v0.32.3 h1:khLF2ivU2T6Q77H97atx3REY9tXiA3OLOjWJxUrdvss= k8s.io/cli-runtime v0.32.3/go.mod h1:vZT6dZq7mZAca53rwUfdFSZjdtLyfF61mkf/8q+Xjak= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= +k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= k8s.io/cloud-provider v0.32.2 h1:8EC+fCYo0r0REczSjOZcVuQPCMxXxCKlgxDbYMrzC30= k8s.io/cloud-provider v0.32.2/go.mod h1:2s8TeAXhVezp5VISaTxM6vW3yDonOZXoN4Aryz1p1PQ= -k8s.io/cluster-bootstrap v0.32.3 h1:AqIpsUhB6MUeaAsl1WvaUw54AHRd2hfZrESlKChtd8s= -k8s.io/cluster-bootstrap v0.32.3/go.mod h1:CHbBwgOb6liDV6JFUTkx5t85T2xidy0sChBDoyYw344= -k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= -k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= +k8s.io/cluster-bootstrap v0.33.3 h1:u2NTxJ5CFSBFXaDxLQoOWMly8eni31psVso+caq6uwI= +k8s.io/cluster-bootstrap v0.33.3/go.mod h1:p970f8u8jf273zyQ5raD8WUu2XyAl0SAWOY82o7i/ds= +k8s.io/component-base v0.33.3 h1:mlAuyJqyPlKZM7FyaoM/LcunZaaY353RXiOd2+B5tGA= +k8s.io/component-base v0.33.3/go.mod h1:ktBVsBzkI3imDuxYXmVxZ2zxJnYTZ4HAsVj9iF09qp4= k8s.io/component-helpers v0.32.3 h1:9veHpOGTPLluqU4hAu5IPOwkOIZiGAJUhHndfVc5FT4= k8s.io/component-helpers v0.32.3/go.mod h1:utTBXk8lhkJewBKNuNf32Xl3KT/0VV19DmiXU/SV4Ao= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= @@ -663,12 +624,12 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.5.9 h1:+ngbNuuzAIy4mIA09/ALZxx0 sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.5.9/go.mod h1:wlb5KMXferSuS9asjIlqjU7yHnCUEtAGnwjYdDtqdmk= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.4.1 h1:F5qZPS35TGb0ghlLGcHrbwzoO3mFnCBMM4ADGAlY+rI= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.4.1/go.mod h1:rEQnoF3pmD1kmAFQCwA/SqHiiftLFeMwdQt0gsuKWbM= -sigs.k8s.io/cluster-api v1.10.7 h1:MuzeuAhLJLTgmqTitVz1XeXZLgAd4tJfnH9xitKG63A= -sigs.k8s.io/cluster-api v1.10.7/go.mod h1:PTuQc7CgNahPlJrLNJ0q4gKdpQ4wITgeVXDiDQQv2to= -sigs.k8s.io/cluster-api/test v1.10.7 h1:RoaKevoDx2xsloAvAzmRcuImmpS7tNJcEd6k/KH2Cf4= -sigs.k8s.io/cluster-api/test v1.10.7/go.mod h1:t177O+xWeeCqbPXVlsGNn+LKkoyFo9Oh3ar0znVRwX0= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/cluster-api v1.11.2 h1:uAczaBavU5Y6aDgyoXWtq28k1kalpSZnVItwXHusw1c= +sigs.k8s.io/cluster-api v1.11.2/go.mod h1:C1gJVAjMXRG+M+djjGYNkoi5kBMhFnOUI9QqZDAtMms= +sigs.k8s.io/cluster-api/test v1.11.2 h1:SaYS/HexHPIU2r7oDBaq/CNOD+NP84oGnRY3KD9YIN8= +sigs.k8s.io/cluster-api/test v1.11.2/go.mod h1:COviHWIKTcip0VADeIh8Rm5bjqzyZ1LuzKBW1EqjJRc= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kind v0.30.0 h1:2Xi1KFEfSMm0XDcvKnUt15ZfgRPCT0OnCBbpgh8DztY= @@ -682,5 +643,6 @@ sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/internal/test/env/env.go b/internal/test/env/env.go index d41cd8b4003..a10973334f8 100644 --- a/internal/test/env/env.go +++ b/internal/test/env/env.go @@ -35,8 +35,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -58,8 +57,7 @@ var ( func init() { // Calculate the scheme. utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(clusterv1.AddToScheme(scheme)) - utilruntime.Must(expv1.AddToScheme(scheme)) + utilruntime.Must(clusterv1beta1.AddToScheme(scheme)) utilruntime.Must(infrav1.AddToScheme(scheme)) utilruntime.Must(infrav1exp.AddToScheme(scheme)) diff --git a/main.go b/main.go index 236e3d9c2a0..c250aa6edaf 100644 --- a/main.go +++ b/main.go @@ -43,11 +43,10 @@ import ( cgrecord "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1beta1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/controllers/remote" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" capifeature "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/flags" "sigs.k8s.io/cluster-api/util/record" @@ -80,9 +79,8 @@ func init() { _ = clientgoscheme.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) _ = infrav1exp.AddToScheme(scheme) - _ = clusterv1.AddToScheme(scheme) - _ = expv1.AddToScheme(scheme) - _ = kubeadmv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) + _ = bootstrapv1beta1.AddToScheme(scheme) _ = asoresourcesv1.AddToScheme(scheme) _ = asocontainerservicev1api20210501.AddToScheme(scheme) _ = asocontainerservicev1api20230201.AddToScheme(scheme) @@ -170,7 +168,7 @@ func InitFlags(fs *pflag.FlagSet) { &watchFilterValue, "watch-filter", "", - fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel), + fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1beta1.WatchLabel), ) fs.StringVar( diff --git a/pkg/mutators/azureasomanagedcontrolplane.go b/pkg/mutators/azureasomanagedcontrolplane.go index 97130ddb073..099275c4e39 100644 --- a/pkg/mutators/azureasomanagedcontrolplane.go +++ b/pkg/mutators/azureasomanagedcontrolplane.go @@ -33,8 +33,7 @@ import ( // but verify that check is actually working. asocontainerservicev1hub "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20240901/storage" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - exputil "sigs.k8s.io/cluster-api/exp/util" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/secret" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/conversion" @@ -43,6 +42,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + clusterv1beta1util "sigs.k8s.io/cluster-api-provider-azure/util/v1beta1" ) var ( @@ -54,7 +54,7 @@ var ( ) // SetManagedClusterDefaults propagates values defined by Cluster API to an ASO ManagedCluster. -func SetManagedClusterDefaults(ctrlClient client.Client, asoManagedControlPlane *infrav1.AzureASOManagedControlPlane, cluster *clusterv1.Cluster) ResourcesMutator { +func SetManagedClusterDefaults(ctrlClient client.Client, asoManagedControlPlane *infrav1.AzureASOManagedControlPlane, cluster *clusterv1beta1.Cluster) ResourcesMutator { return func(ctx context.Context, us []*unstructured.Unstructured) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "mutators.SetManagedClusterDefaults") defer done() @@ -127,7 +127,7 @@ func setManagedClusterKubernetesVersion(ctx context.Context, asoManagedControlPl return unstructured.SetNestedField(managedCluster.UnstructuredContent(), capzK8sVersion, k8sVersionPath...) } -func setManagedClusterServiceCIDR(ctx context.Context, cluster *clusterv1.Cluster, managedClusterPath string, managedCluster *unstructured.Unstructured) error { +func setManagedClusterServiceCIDR(ctx context.Context, cluster *clusterv1beta1.Cluster, managedClusterPath string, managedCluster *unstructured.Unstructured) error { _, log, done := tele.StartSpanWithLogger(ctx, "mutators.setManagedClusterServiceCIDR") defer done() @@ -160,7 +160,7 @@ func setManagedClusterServiceCIDR(ctx context.Context, cluster *clusterv1.Cluste return unstructured.SetNestedField(managedCluster.UnstructuredContent(), capiCIDR, svcCIDRPath...) } -func setManagedClusterPodCIDR(ctx context.Context, cluster *clusterv1.Cluster, managedClusterPath string, managedCluster *unstructured.Unstructured) error { +func setManagedClusterPodCIDR(ctx context.Context, cluster *clusterv1beta1.Cluster, managedClusterPath string, managedCluster *unstructured.Unstructured) error { _, log, done := tele.StartSpanWithLogger(ctx, "mutators.setManagedClusterPodCIDR") defer done() @@ -193,7 +193,7 @@ func setManagedClusterPodCIDR(ctx context.Context, cluster *clusterv1.Cluster, m return unstructured.SetNestedField(managedCluster.UnstructuredContent(), capiCIDR, podCIDRPath...) } -func setManagedClusterAgentPoolProfiles(ctx context.Context, ctrlClient client.Client, namespace string, cluster *clusterv1.Cluster, managedClusterPath string, managedCluster *unstructured.Unstructured) error { +func setManagedClusterAgentPoolProfiles(ctx context.Context, ctrlClient client.Client, namespace string, cluster *clusterv1beta1.Cluster, managedClusterPath string, managedCluster *unstructured.Unstructured) error { ctx, log, done := tele.StartSpanWithLogger(ctx, "mutators.setManagedClusterAgentPoolProfiles") defer done() @@ -259,7 +259,7 @@ func agentPoolsFromManagedMachinePools(ctx context.Context, ctrlClient client.Cl err := ctrlClient.List(ctx, asoManagedMachinePools, client.InNamespace(namespace), client.MatchingLabels{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, ) if err != nil { @@ -273,7 +273,7 @@ func agentPoolsFromManagedMachinePools(ctx context.Context, ctrlClient client.Cl var agentPools []conversion.Convertible for _, asoManagedMachinePool := range asoManagedMachinePools.Items { - machinePool, err := exputil.GetOwnerMachinePool(ctx, ctrlClient, asoManagedMachinePool.ObjectMeta) + machinePool, err := clusterv1beta1util.GetOwnerMachinePool(ctx, ctrlClient, asoManagedMachinePool.ObjectMeta) if err != nil { return nil, err } @@ -380,7 +380,7 @@ func setAgentPoolProfilesFromAgentPools(managedCluster conversion.Convertible, a return managedCluster.ConvertFrom(hubMC) } -func setManagedClusterCredentials(ctx context.Context, cluster *clusterv1.Cluster, managedClusterPath string, managedCluster *unstructured.Unstructured) error { +func setManagedClusterCredentials(ctx context.Context, cluster *clusterv1beta1.Cluster, managedClusterPath string, managedCluster *unstructured.Unstructured) error { _, log, done := tele.StartSpanWithLogger(ctx, "mutators.setManagedClusterCredentials") defer done() diff --git a/pkg/mutators/azureasomanagedcontrolplane_test.go b/pkg/mutators/azureasomanagedcontrolplane_test.go index fa3dd0eba2a..082ce88707c 100644 --- a/pkg/mutators/azureasomanagedcontrolplane_test.go +++ b/pkg/mutators/azureasomanagedcontrolplane_test.go @@ -32,8 +32,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/secret" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -49,7 +48,7 @@ func TestSetManagedClusterDefaults(t *testing.T) { tests := []struct { name string asoManagedControlPlane *infrav1.AzureASOManagedControlPlane - cluster *clusterv1.Cluster + cluster *clusterv1beta1.Cluster expected []*unstructured.Unstructured expectedErr error }{ @@ -78,16 +77,16 @@ func TestSetManagedClusterDefaults(t *testing.T) { }, }, }, - cluster: &clusterv1.Cluster{ + cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, - Spec: clusterv1.ClusterSpec{ - ClusterNetwork: &clusterv1.ClusterNetwork{ - Pods: &clusterv1.NetworkRanges{ + Spec: clusterv1beta1.ClusterSpec{ + ClusterNetwork: &clusterv1beta1.ClusterNetwork{ + Pods: &clusterv1beta1.NetworkRanges{ CIDRBlocks: []string{"pod-0", "pod-1"}, }, - Services: &clusterv1.NetworkRanges{ + Services: &clusterv1beta1.NetworkRanges{ CIDRBlocks: []string{"svc-0", "svc-1"}, }, }, @@ -251,14 +250,14 @@ func TestSetManagedClusterServiceCIDR(t *testing.T) { tests := []struct { name string - cluster *clusterv1.Cluster + cluster *clusterv1beta1.Cluster managedCluster *asocontainerservicev1.ManagedCluster expected *asocontainerservicev1.ManagedCluster expectedErr error }{ { name: "no CAPI opinion", - cluster: &clusterv1.Cluster{}, + cluster: &clusterv1beta1.Cluster{}, managedCluster: &asocontainerservicev1.ManagedCluster{ Spec: asocontainerservicev1.ManagedCluster_Spec{ NetworkProfile: &asocontainerservicev1.ContainerServiceNetworkProfile{ @@ -276,10 +275,10 @@ func TestSetManagedClusterServiceCIDR(t *testing.T) { }, { name: "set from CAPI opinion", - cluster: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ - ClusterNetwork: &clusterv1.ClusterNetwork{ - Services: &clusterv1.NetworkRanges{ + cluster: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ + ClusterNetwork: &clusterv1beta1.ClusterNetwork{ + Services: &clusterv1beta1.NetworkRanges{ CIDRBlocks: []string{"capi cidr"}, }, }, @@ -296,10 +295,10 @@ func TestSetManagedClusterServiceCIDR(t *testing.T) { }, { name: "user value matching CAPI ok", - cluster: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ - ClusterNetwork: &clusterv1.ClusterNetwork{ - Services: &clusterv1.NetworkRanges{ + cluster: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ + ClusterNetwork: &clusterv1beta1.ClusterNetwork{ + Services: &clusterv1beta1.NetworkRanges{ CIDRBlocks: []string{"capi cidr"}, }, }, @@ -322,14 +321,14 @@ func TestSetManagedClusterServiceCIDR(t *testing.T) { }, { name: "incompatible", - cluster: &clusterv1.Cluster{ + cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "name", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ - ClusterNetwork: &clusterv1.ClusterNetwork{ - Services: &clusterv1.NetworkRanges{ + Spec: clusterv1beta1.ClusterSpec{ + ClusterNetwork: &clusterv1beta1.ClusterNetwork{ + Services: &clusterv1beta1.NetworkRanges{ CIDRBlocks: []string{"capi cidr"}, }, }, @@ -381,14 +380,14 @@ func TestSetManagedClusterPodCIDR(t *testing.T) { tests := []struct { name string - cluster *clusterv1.Cluster + cluster *clusterv1beta1.Cluster managedCluster *asocontainerservicev1.ManagedCluster expected *asocontainerservicev1.ManagedCluster expectedErr error }{ { name: "no CAPI opinion", - cluster: &clusterv1.Cluster{}, + cluster: &clusterv1beta1.Cluster{}, managedCluster: &asocontainerservicev1.ManagedCluster{ Spec: asocontainerservicev1.ManagedCluster_Spec{ NetworkProfile: &asocontainerservicev1.ContainerServiceNetworkProfile{ @@ -406,10 +405,10 @@ func TestSetManagedClusterPodCIDR(t *testing.T) { }, { name: "set from CAPI opinion", - cluster: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ - ClusterNetwork: &clusterv1.ClusterNetwork{ - Pods: &clusterv1.NetworkRanges{ + cluster: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ + ClusterNetwork: &clusterv1beta1.ClusterNetwork{ + Pods: &clusterv1beta1.NetworkRanges{ CIDRBlocks: []string{"capi cidr"}, }, }, @@ -426,10 +425,10 @@ func TestSetManagedClusterPodCIDR(t *testing.T) { }, { name: "user value matching CAPI ok", - cluster: &clusterv1.Cluster{ - Spec: clusterv1.ClusterSpec{ - ClusterNetwork: &clusterv1.ClusterNetwork{ - Pods: &clusterv1.NetworkRanges{ + cluster: &clusterv1beta1.Cluster{ + Spec: clusterv1beta1.ClusterSpec{ + ClusterNetwork: &clusterv1beta1.ClusterNetwork{ + Pods: &clusterv1beta1.NetworkRanges{ CIDRBlocks: []string{"capi cidr"}, }, }, @@ -452,14 +451,14 @@ func TestSetManagedClusterPodCIDR(t *testing.T) { }, { name: "incompatible", - cluster: &clusterv1.Cluster{ + cluster: &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "name", Namespace: "ns", }, - Spec: clusterv1.ClusterSpec{ - ClusterNetwork: &clusterv1.ClusterNetwork{ - Pods: &clusterv1.NetworkRanges{ + Spec: clusterv1beta1.ClusterSpec{ + ClusterNetwork: &clusterv1beta1.ClusterNetwork{ + Pods: &clusterv1beta1.NetworkRanges{ CIDRBlocks: []string{"capi cidr"}, }, }, @@ -512,7 +511,7 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { s := runtime.NewScheme() g.Expect(asocontainerservicev1.AddToScheme(s)).To(Succeed()) g.Expect(infrav1.AddToScheme(s)).To(Succeed()) - g.Expect(expv1.AddToScheme(s)).To(Succeed()) + g.Expect(clusterv1beta1.AddToScheme(s)).To(Succeed()) fakeClientBuilder := func() *fakeclient.ClientBuilder { return fakeclient.NewClientBuilder().WithScheme(s) } @@ -575,11 +574,11 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { Name: "wrong-label", Namespace: namespace, Labels: map[string]string{ - clusterv1.ClusterNameLabel: "not-" + clusterName, + clusterv1beta1.ClusterNameLabel: "not-" + clusterName, }, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: expv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "MachinePool", Name: "wrong-label", }, @@ -604,11 +603,11 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { Name: "wrong-namespace", Namespace: "not-" + namespace, Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: expv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "MachinePool", Name: "wrong-namespace", }, @@ -633,11 +632,11 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { Name: "pool0", Namespace: namespace, Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: expv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "MachinePool", Name: "pool0", }, @@ -662,11 +661,11 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { Name: "pool1", Namespace: namespace, Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: expv1.GroupVersion.Identifier(), + APIVersion: clusterv1beta1.GroupVersion.Identifier(), Kind: "MachinePool", Name: "pool1", }, @@ -688,8 +687,8 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { }, }, } - machinePools := &expv1.MachinePoolList{ - Items: []expv1.MachinePool{ + machinePools := &clusterv1beta1.MachinePoolList{ + Items: []clusterv1beta1.MachinePool{ { ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -707,7 +706,7 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { Namespace: namespace, Name: "pool0", }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -716,7 +715,7 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { Namespace: namespace, Name: "pool1", }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ Replicas: ptr.To[int32](2), }, }, @@ -737,7 +736,7 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { Build(), } - cluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}} + cluster := &clusterv1beta1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}} err := setManagedClusterAgentPoolProfiles(ctx, c, namespace, cluster, "", umc) g.Expect(err).NotTo(HaveOccurred()) g.Expect(s.Convert(umc, managedCluster, nil)).To(Succeed()) diff --git a/pkg/mutators/azureasomanagedmachinepool.go b/pkg/mutators/azureasomanagedmachinepool.go index 0c92d23c55b..f62b62fb508 100644 --- a/pkg/mutators/azureasomanagedmachinepool.go +++ b/pkg/mutators/azureasomanagedmachinepool.go @@ -23,8 +23,7 @@ import ( asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -36,7 +35,7 @@ import ( var ErrNoManagedClustersAgentPoolDefined = fmt.Errorf("no %s ManagedClustersAgentPools defined in AzureASOManagedMachinePool spec.resources", asocontainerservicev1.GroupVersion.Group) // SetAgentPoolDefaults propagates config from a MachinePool to an AzureASOManagedMachinePool's defined ManagedClustersAgentPool. -func SetAgentPoolDefaults(ctrlClient client.Client, machinePool *expv1.MachinePool) ResourcesMutator { +func SetAgentPoolDefaults(ctrlClient client.Client, machinePool *clusterv1beta1.MachinePool) ResourcesMutator { return func(ctx context.Context, us []*unstructured.Unstructured) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "mutators.SetAgentPoolDefaults") defer done() @@ -71,7 +70,7 @@ func SetAgentPoolDefaults(ctrlClient client.Client, machinePool *expv1.MachinePo } } -func setAgentPoolOrchestratorVersion(ctx context.Context, machinePool *expv1.MachinePool, agentPoolPath string, agentPool *unstructured.Unstructured) error { +func setAgentPoolOrchestratorVersion(ctx context.Context, machinePool *clusterv1beta1.MachinePool, agentPoolPath string, agentPool *unstructured.Unstructured) error { _, log, done := tele.StartSpanWithLogger(ctx, "mutators.setAgentPoolOrchestratorVersion") defer done() @@ -100,7 +99,7 @@ func setAgentPoolOrchestratorVersion(ctx context.Context, machinePool *expv1.Mac return unstructured.SetNestedField(agentPool.UnstructuredContent(), capiK8sVersion, k8sVersionPath...) } -func reconcileAutoscaling(agentPool *unstructured.Unstructured, machinePool *expv1.MachinePool) error { +func reconcileAutoscaling(agentPool *unstructured.Unstructured, machinePool *clusterv1beta1.MachinePool) error { autoscaling, _, err := unstructured.NestedBool(agentPool.UnstructuredContent(), "spec", "enableAutoScaling") if err != nil { return err @@ -109,26 +108,26 @@ func reconcileAutoscaling(agentPool *unstructured.Unstructured, machinePool *exp // Update the MachinePool replica manager annotation. This isn't wrapped in a mutation object because // it's not modifying an ASO resource and users are not expected to set this manually. This behavior // is documented by CAPI as expected of a provider. - replicaManager, ok := machinePool.Annotations[clusterv1.ReplicasManagedByAnnotation] + replicaManager, ok := machinePool.Annotations[clusterv1beta1.ReplicasManagedByAnnotation] if autoscaling { if !ok { if machinePool.Annotations == nil { machinePool.Annotations = make(map[string]string) } - machinePool.Annotations[clusterv1.ReplicasManagedByAnnotation] = infrav1.ReplicasManagedByAKS + machinePool.Annotations[clusterv1beta1.ReplicasManagedByAnnotation] = infrav1.ReplicasManagedByAKS } else if replicaManager != infrav1.ReplicasManagedByAKS { - return fmt.Errorf("failed to enable autoscaling, replicas are already being managed by %s according to MachinePool %s's %s annotation", replicaManager, machinePool.Name, clusterv1.ReplicasManagedByAnnotation) + return fmt.Errorf("failed to enable autoscaling, replicas are already being managed by %s according to MachinePool %s's %s annotation", replicaManager, machinePool.Name, clusterv1beta1.ReplicasManagedByAnnotation) } } else if !autoscaling && replicaManager == infrav1.ReplicasManagedByAKS { // Removing this annotation informs the MachinePool controller that this MachinePool is no longer // being autoscaled. - delete(machinePool.Annotations, clusterv1.ReplicasManagedByAnnotation) + delete(machinePool.Annotations, clusterv1beta1.ReplicasManagedByAnnotation) } return nil } -func setAgentPoolCount(ctx context.Context, ctrlClient client.Client, machinePool *expv1.MachinePool, agentPoolPath string, agentPool *unstructured.Unstructured) error { +func setAgentPoolCount(ctx context.Context, ctrlClient client.Client, machinePool *clusterv1beta1.MachinePool, agentPoolPath string, agentPool *unstructured.Unstructured) error { _, log, done := tele.StartSpanWithLogger(ctx, "mutators.setAgentPoolCount") defer done() @@ -139,7 +138,7 @@ func setAgentPoolCount(ctx context.Context, ctrlClient client.Client, machinePoo // When managed by any autoscaler, CAPZ should not provide any spec.count to the ManagedClustersAgentPool // to prevent ASO from overwriting the autoscaler's opinion of the replica count. // The MachinePool's spec.replicas is used to seed an initial value as required by AKS. - if _, autoscaling := machinePool.Annotations[clusterv1.ReplicasManagedByAnnotation]; autoscaling { + if _, autoscaling := machinePool.Annotations[clusterv1beta1.ReplicasManagedByAnnotation]; autoscaling { existingAgentPool := &asocontainerservicev1.ManagedClustersAgentPool{} err := ctrlClient.Get(ctx, client.ObjectKey{Namespace: machinePool.GetNamespace(), Name: agentPool.GetName()}, existingAgentPool) if client.IgnoreNotFound(err) != nil { diff --git a/pkg/mutators/azureasomanagedmachinepool_test.go b/pkg/mutators/azureasomanagedmachinepool_test.go index 96a4e3c874c..445a6826108 100644 --- a/pkg/mutators/azureasomanagedmachinepool_test.go +++ b/pkg/mutators/azureasomanagedmachinepool_test.go @@ -27,8 +27,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -42,7 +41,7 @@ func TestSetAgentPoolDefaults(t *testing.T) { tests := []struct { name string asoManagedMachinePool *infrav1.AzureASOManagedMachinePool - machinePool *expv1.MachinePool + machinePool *clusterv1beta1.MachinePool expected []*unstructured.Unstructured expectedErr error }{ @@ -70,11 +69,11 @@ func TestSetAgentPoolDefaults(t *testing.T) { }, }, }, - machinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ + machinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ Replicas: ptr.To[int32](1), - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("vcapi k8s version"), }, }, @@ -112,17 +111,17 @@ func TestSetAgentPoolOrchestratorVersion(t *testing.T) { tests := []struct { name string - machinePool *expv1.MachinePool + machinePool *clusterv1beta1.MachinePool agentPool *asocontainerservicev1.ManagedClustersAgentPool expected *asocontainerservicev1.ManagedClustersAgentPool expectedErr error }{ { name: "no CAPI opinion", - machinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + machinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: nil, }, }, @@ -141,10 +140,10 @@ func TestSetAgentPoolOrchestratorVersion(t *testing.T) { }, { name: "set from CAPI opinion", - machinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + machinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("vcapi k8s version"), }, }, @@ -163,10 +162,10 @@ func TestSetAgentPoolOrchestratorVersion(t *testing.T) { }, { name: "user value matching CAPI ok", - machinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + machinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("vcapi k8s version"), }, }, @@ -185,13 +184,13 @@ func TestSetAgentPoolOrchestratorVersion(t *testing.T) { }, { name: "incompatible", - machinePool: &expv1.MachinePool{ + machinePool: &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", }, - Spec: expv1.MachinePoolSpec{ - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ Version: ptr.To("vcapi k8s version"), }, }, @@ -240,21 +239,21 @@ func TestReconcileAutoscaling(t *testing.T) { tests := []struct { name string autoscaling bool - machinePool *expv1.MachinePool - expected *expv1.MachinePool + machinePool *clusterv1beta1.MachinePool + expected *clusterv1beta1.MachinePool expectedErr error }{ { name: "autoscaling disabled removes aks annotation", autoscaling: false, - machinePool: &expv1.MachinePool{ + machinePool: &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: infrav1.ReplicasManagedByAKS, + clusterv1beta1.ReplicasManagedByAnnotation: infrav1.ReplicasManagedByAKS, }, }, }, - expected: &expv1.MachinePool{ + expected: &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{}, }, @@ -263,17 +262,17 @@ func TestReconcileAutoscaling(t *testing.T) { { name: "autoscaling disabled leaves other annotation", autoscaling: false, - machinePool: &expv1.MachinePool{ + machinePool: &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: "not-" + infrav1.ReplicasManagedByAKS, + clusterv1beta1.ReplicasManagedByAnnotation: "not-" + infrav1.ReplicasManagedByAKS, }, }, }, - expected: &expv1.MachinePool{ + expected: &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: "not-" + infrav1.ReplicasManagedByAKS, + clusterv1beta1.ReplicasManagedByAnnotation: "not-" + infrav1.ReplicasManagedByAKS, }, }, }, @@ -281,15 +280,15 @@ func TestReconcileAutoscaling(t *testing.T) { { name: "autoscaling enabled, manager undefined adds annotation", autoscaling: true, - machinePool: &expv1.MachinePool{ + machinePool: &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{}, }, }, - expected: &expv1.MachinePool{ + expected: &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: infrav1.ReplicasManagedByAKS, + clusterv1beta1.ReplicasManagedByAnnotation: infrav1.ReplicasManagedByAKS, }, }, }, @@ -297,17 +296,17 @@ func TestReconcileAutoscaling(t *testing.T) { { name: "autoscaling enabled, manager already set", autoscaling: true, - machinePool: &expv1.MachinePool{ + machinePool: &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: infrav1.ReplicasManagedByAKS, + clusterv1beta1.ReplicasManagedByAnnotation: infrav1.ReplicasManagedByAKS, }, }, }, - expected: &expv1.MachinePool{ + expected: &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: infrav1.ReplicasManagedByAKS, + clusterv1beta1.ReplicasManagedByAnnotation: infrav1.ReplicasManagedByAKS, }, }, }, @@ -315,11 +314,11 @@ func TestReconcileAutoscaling(t *testing.T) { { name: "autoscaling enabled, manager set to something else", autoscaling: true, - machinePool: &expv1.MachinePool{ + machinePool: &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: "not-" + infrav1.ReplicasManagedByAKS, + clusterv1beta1.ReplicasManagedByAnnotation: "not-" + infrav1.ReplicasManagedByAKS, }, }, }, @@ -354,7 +353,7 @@ func TestSetAgentPoolCount(t *testing.T) { tests := []struct { name string - machinePool *expv1.MachinePool + machinePool *clusterv1beta1.MachinePool agentPool *asocontainerservicev1.ManagedClustersAgentPool existingAgentPool *asocontainerservicev1.ManagedClustersAgentPool expected *asocontainerservicev1.ManagedClustersAgentPool @@ -362,8 +361,8 @@ func TestSetAgentPoolCount(t *testing.T) { }{ { name: "no CAPI opinion", - machinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ + machinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ Replicas: nil, }, }, @@ -380,13 +379,13 @@ func TestSetAgentPoolCount(t *testing.T) { }, { name: "autoscaling enabled", - machinePool: &expv1.MachinePool{ + machinePool: &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: infrav1.ReplicasManagedByAKS, + clusterv1beta1.ReplicasManagedByAnnotation: infrav1.ReplicasManagedByAKS, }, }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ Replicas: ptr.To[int32](3), }, }, @@ -408,8 +407,8 @@ func TestSetAgentPoolCount(t *testing.T) { }, { name: "set from CAPI opinion", - machinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ + machinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -426,8 +425,8 @@ func TestSetAgentPoolCount(t *testing.T) { }, { name: "user value matching CAPI ok", - machinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ + machinePool: &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -444,11 +443,11 @@ func TestSetAgentPoolCount(t *testing.T) { }, { name: "incompatible", - machinePool: &expv1.MachinePool{ + machinePool: &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, diff --git a/test/e2e/aks.go b/test/e2e/aks.go index d476d80bce2..a2263b675a3 100644 --- a/test/e2e/aks.go +++ b/test/e2e/aks.go @@ -28,8 +28,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/controller-runtime/pkg/client" @@ -133,7 +132,7 @@ const ( ) // value returns the integer equivalent of controlPlaneReplicas -func (r controlPlaneReplicas) value(mp *expv1.MachinePool) int { +func (r controlPlaneReplicas) value(mp *clusterv1.MachinePool) int { switch r { case atLeastOne: return 1 @@ -202,7 +201,7 @@ func WaitForAKSSystemNodePoolMachinesToExist(ctx context.Context, input WaitForC continue } - ownerMachinePool := &expv1.MachinePool{} + ownerMachinePool := &clusterv1.MachinePool{} if err := input.Getter.Get(ctx, types.NamespacedName{Namespace: input.Namespace, Name: ref.Name}, ownerMachinePool); err != nil { LogWarningf("Failed to get machinePool: %+v", err) diff --git a/test/e2e/aks_adopt.go b/test/e2e/aks_adopt.go index 29382fd49ac..87ad55447ec 100644 --- a/test/e2e/aks_adopt.go +++ b/test/e2e/aks_adopt.go @@ -25,9 +25,8 @@ import ( . "github.com/onsi/gomega" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/controller-runtime/pkg/client" @@ -38,7 +37,7 @@ type AKSAdoptSpecInput struct { ApplyInput clusterctl.ApplyClusterTemplateAndWaitInput ApplyResult *clusterctl.ApplyClusterTemplateAndWaitResult Cluster *clusterv1.Cluster - MachinePools []*expv1.MachinePool + MachinePools []*clusterv1.MachinePool } // AKSAdoptSpec tests adopting an existing AKS cluster into management by CAPZ. It first relies on a CAPZ AKS diff --git a/test/e2e/aks_autoscaler.go b/test/e2e/aks_autoscaler.go index 32de0dcbabb..8ba7ff89d4b 100644 --- a/test/e2e/aks_autoscaler.go +++ b/test/e2e/aks_autoscaler.go @@ -28,8 +28,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -37,7 +36,7 @@ import ( type AKSAutoscaleSpecInput struct { Cluster *clusterv1.Cluster - MachinePool *expv1.MachinePool + MachinePool *clusterv1.MachinePool WaitIntervals []interface{} } diff --git a/test/e2e/aks_azure_cluster_autoscaler.go b/test/e2e/aks_azure_cluster_autoscaler.go index 7073140b39d..2a8d22dc55c 100644 --- a/test/e2e/aks_azure_cluster_autoscaler.go +++ b/test/e2e/aks_azure_cluster_autoscaler.go @@ -28,9 +28,9 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) type AKSAzureClusterAutoscalerSettingsSpecInput struct { diff --git a/test/e2e/aks_byo_node.go b/test/e2e/aks_byo_node.go index 70ea34ed45b..21c2322778e 100644 --- a/test/e2e/aks_byo_node.go +++ b/test/e2e/aks_byo_node.go @@ -28,10 +28,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + bootstrapv1beta1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -39,7 +37,7 @@ import ( ) type AKSBYONodeSpecInput struct { - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster KubernetesVersion string WaitIntervals []interface{} ExpectedWorkerNodes int32 @@ -71,16 +69,16 @@ func AKSBYONodeSpec(ctx context.Context, inputGetter func() AKSBYONodeSpecInput) err = mgmtClient.Create(ctx, infraMachinePool) Expect(err).NotTo(HaveOccurred()) - kubeadmConfig := &bootstrapv1.KubeadmConfig{ + kubeadmConfig := &bootstrapv1beta1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Namespace: infraMachinePool.Namespace, Name: infraMachinePool.Name, }, - Spec: bootstrapv1.KubeadmConfigSpec{ - Files: []bootstrapv1.File{ + Spec: bootstrapv1beta1.KubeadmConfigSpec{ + Files: []bootstrapv1beta1.File{ { - ContentFrom: &bootstrapv1.FileSource{ - Secret: bootstrapv1.SecretFileSource{ + ContentFrom: &bootstrapv1beta1.FileSource{ + Secret: bootstrapv1beta1.SecretFileSource{ Name: infraMachinePool.Name + "-azure-json", Key: "worker-node-azure.json", }, @@ -90,8 +88,8 @@ func AKSBYONodeSpec(ctx context.Context, inputGetter func() AKSBYONodeSpecInput) Owner: "root:root", }, { - ContentFrom: &bootstrapv1.FileSource{ - Secret: bootstrapv1.SecretFileSource{ + ContentFrom: &bootstrapv1beta1.FileSource{ + Secret: bootstrapv1beta1.SecretFileSource{ Name: input.Cluster.Name + "-kubeconfig", Key: "value", }, @@ -101,13 +99,13 @@ func AKSBYONodeSpec(ctx context.Context, inputGetter func() AKSBYONodeSpecInput) Owner: "root:root", }, }, - JoinConfiguration: &bootstrapv1.JoinConfiguration{ - Discovery: bootstrapv1.Discovery{ - File: &bootstrapv1.FileDiscovery{ + JoinConfiguration: &bootstrapv1beta1.JoinConfiguration{ + Discovery: bootstrapv1beta1.Discovery{ + File: &bootstrapv1beta1.FileDiscovery{ KubeConfigPath: "/etc/kubernetes/admin.conf", }, }, - NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + NodeRegistration: bootstrapv1beta1.NodeRegistrationOptions{ Name: "{{ ds.meta_data[\"local_hostname\"] }}", KubeletExtraArgs: map[string]string{ "cloud-provider": "external", @@ -120,19 +118,19 @@ func AKSBYONodeSpec(ctx context.Context, inputGetter func() AKSBYONodeSpecInput) err = mgmtClient.Create(ctx, kubeadmConfig) Expect(err).NotTo(HaveOccurred()) - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1beta1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Namespace: infraMachinePool.Namespace, Name: infraMachinePool.Name, }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1beta1.MachinePoolSpec{ ClusterName: input.Cluster.Name, Replicas: ptr.To[int32](2), - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ + Template: clusterv1beta1.MachineTemplateSpec{ + Spec: clusterv1beta1.MachineSpec{ + Bootstrap: clusterv1beta1.Bootstrap{ ConfigRef: &corev1.ObjectReference{ - APIVersion: bootstrapv1.GroupVersion.String(), + APIVersion: bootstrapv1beta1.GroupVersion.String(), Kind: "KubeadmConfig", Name: kubeadmConfig.Name, }, @@ -162,7 +160,7 @@ func AKSBYONodeSpec(ctx context.Context, inputGetter func() AKSBYONodeSpecInput) pool := &infrav1exp.AzureMachinePool{} err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(infraMachinePool), pool) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(conditions.IsTrue(pool, infrav1.BootstrapSucceededCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(pool, infrav1.BootstrapSucceededCondition)).To(BeTrue()) }, input.WaitIntervals...).Should(Succeed()) By("Adding the expected AKS labels to the nodes") @@ -182,9 +180,9 @@ func AKSBYONodeSpec(ctx context.Context, inputGetter func() AKSBYONodeSpecInput) By("Verifying the MachinePool becomes ready") Eventually(func(g Gomega) { - pool := &expv1.MachinePool{} + pool := &clusterv1beta1.MachinePool{} err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(machinePool), pool) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(conditions.IsTrue(pool, clusterv1.ReadyCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(pool, clusterv1beta1.ReadyCondition)).To(BeTrue()) }, input.WaitIntervals...).Should(Succeed()) } diff --git a/test/e2e/aks_clusterclass.go b/test/e2e/aks_clusterclass.go index 3bfc745c157..25e04e932a3 100644 --- a/test/e2e/aks_clusterclass.go +++ b/test/e2e/aks_clusterclass.go @@ -28,16 +28,14 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" ) type AKSClusterClassInput struct { - Cluster *clusterv1.Cluster - MachinePool *expv1.MachinePool + Cluster *clusterv1beta1.Cluster + MachinePool *clusterv1beta1.MachinePool WaitIntervals []interface{} WaitUpgradeIntervals []interface{} KubernetesVersionUpgradeTo string @@ -68,7 +66,7 @@ func AKSClusterClassSpec(ctx context.Context, inputGetter func() AKSClusterClass By("Editing the AzureManagedMachinePoolTemplate to change the scale down mode") ammpt := &infrav1.AzureManagedMachinePoolTemplate{} - clusterClass := &clusterv1.ClusterClass{} + clusterClass := &clusterv1beta1.ClusterClass{} err = mgmtClient.Get(ctx, types.NamespacedName{ Namespace: input.Cluster.Namespace, Name: "default", diff --git a/test/e2e/aks_fleets_member.go b/test/e2e/aks_fleets_member.go index 09bb20f544e..ed83551588e 100644 --- a/test/e2e/aks_fleets_member.go +++ b/test/e2e/aks_fleets_member.go @@ -31,8 +31,8 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -109,7 +109,7 @@ func AKSFleetsMemberSpec(ctx context.Context, inputGetter func() AKSFleetsMember }, } g.Expect(mgmtClient.Update(ctx, infraControlPlane)).To(Succeed()) - g.Expect(conditions.IsTrue(infraControlPlane, infrav1.FleetReadyCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(infraControlPlane, infrav1.FleetReadyCondition)).To(BeTrue()) }, input.WaitIntervals...).Should(Succeed()) By("Ensuring the fleet member is created and attached to the managed cluster") diff --git a/test/e2e/aks_machinepools.go b/test/e2e/aks_machinepools.go index ad627848a6e..cca410920f2 100644 --- a/test/e2e/aks_machinepools.go +++ b/test/e2e/aks_machinepools.go @@ -28,8 +28,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/controller-runtime/pkg/client" @@ -40,7 +39,7 @@ import ( type AKSMachinePoolSpecInput struct { MgmtCluster framework.ClusterProxy Cluster *clusterv1.Cluster - MachinePools []*expv1.MachinePool + MachinePools []*clusterv1.MachinePool WaitIntervals []interface{} } @@ -50,7 +49,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp for _, mp := range input.MachinePools { wg.Add(1) - go func(mp *expv1.MachinePool) { + go func(mp *clusterv1.MachinePool) { defer GinkgoRecover() defer wg.Done() @@ -61,7 +60,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp ClusterProxy: input.MgmtCluster, Cluster: input.Cluster, Replicas: ptr.Deref(mp.Spec.Replicas, 0) + 1, - MachinePools: []*expv1.MachinePool{mp}, + MachinePools: []*clusterv1.MachinePool{mp}, WaitForMachinePoolToScale: input.WaitIntervals, }) @@ -70,7 +69,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp ClusterProxy: input.MgmtCluster, Cluster: input.Cluster, Replicas: ptr.Deref(mp.Spec.Replicas, 0) - 1, - MachinePools: []*expv1.MachinePool{mp}, + MachinePools: []*clusterv1.MachinePool{mp}, WaitForMachinePoolToScale: input.WaitIntervals, }) @@ -119,7 +118,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp ClusterProxy: input.MgmtCluster, Cluster: input.Cluster, Replicas: 0, - MachinePools: []*expv1.MachinePool{mp}, + MachinePools: []*clusterv1.MachinePool{mp}, WaitForMachinePoolToScale: input.WaitIntervals, }) } @@ -129,7 +128,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp ClusterProxy: input.MgmtCluster, Cluster: input.Cluster, Replicas: originalReplicas, - MachinePools: []*expv1.MachinePool{mp}, + MachinePools: []*clusterv1.MachinePool{mp}, WaitForMachinePoolToScale: input.WaitIntervals, }) }(mp) diff --git a/test/e2e/aks_marketplace.go b/test/e2e/aks_marketplace.go index 121fccaefd5..2a31b6304c7 100644 --- a/test/e2e/aks_marketplace.go +++ b/test/e2e/aks_marketplace.go @@ -31,8 +31,8 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -150,7 +150,7 @@ func AKSMarketplaceExtensionSpec(ctx context.Context, inputGetter func() AKSMark Eventually(func(g Gomega) { err = mgmtClient.Get(ctx, client.ObjectKey{Namespace: input.Cluster.Spec.ControlPlaneRef.Namespace, Name: input.Cluster.Spec.ControlPlaneRef.Name}, infraControlPlane) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(conditions.IsTrue(infraControlPlane, infrav1.AKSExtensionsReadyCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(infraControlPlane, infrav1.AKSExtensionsReadyCondition)).To(BeTrue()) }, input.WaitIntervals...).Should(Succeed()) By("Ensuring the AKS Marketplace Extension is added to the AzureManagedControlPlane") diff --git a/test/e2e/aks_node_labels.go b/test/e2e/aks_node_labels.go index ec16be07d31..024223c7df8 100644 --- a/test/e2e/aks_node_labels.go +++ b/test/e2e/aks_node_labels.go @@ -29,8 +29,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -38,7 +37,7 @@ import ( type AKSNodeLabelsSpecInput struct { Cluster *clusterv1.Cluster - MachinePools []*expv1.MachinePool + MachinePools []*clusterv1.MachinePool WaitForUpdate []interface{} } @@ -65,7 +64,7 @@ func AKSNodeLabelsSpec(ctx context.Context, inputGetter func() AKSNodeLabelsSpec for _, mp := range input.MachinePools { wg.Add(1) - go func(mp *expv1.MachinePool) { + go func(mp *clusterv1.MachinePool) { defer GinkgoRecover() defer wg.Done() diff --git a/test/e2e/aks_node_taints.go b/test/e2e/aks_node_taints.go index 69ff71a5d7b..9b6e15ff7bd 100644 --- a/test/e2e/aks_node_taints.go +++ b/test/e2e/aks_node_taints.go @@ -31,8 +31,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -40,7 +39,7 @@ import ( type AKSNodeTaintsSpecInput struct { Cluster *clusterv1.Cluster - MachinePools []*expv1.MachinePool + MachinePools []*clusterv1.MachinePool WaitForUpdate []interface{} } @@ -67,7 +66,7 @@ func AKSNodeTaintsSpec(ctx context.Context, inputGetter func() AKSNodeTaintsSpec for _, mp := range input.MachinePools { wg.Add(1) - go func(mp *expv1.MachinePool) { + go func(mp *clusterv1.MachinePool) { defer GinkgoRecover() defer wg.Done() diff --git a/test/e2e/aks_patches.go b/test/e2e/aks_patches.go index e40001182e5..a64111eaaee 100644 --- a/test/e2e/aks_patches.go +++ b/test/e2e/aks_patches.go @@ -32,8 +32,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -41,7 +40,7 @@ import ( type AKSPatchSpecInput struct { Cluster *clusterv1.Cluster - MachinePools []*expv1.MachinePool + MachinePools []*clusterv1.MachinePool WaitForUpdate []interface{} } @@ -177,7 +176,7 @@ func AKSPatchSpec(ctx context.Context, inputGetter func() AKSPatchSpecInput) { for _, mp := range input.MachinePools { wg.Add(1) - go func(mp *expv1.MachinePool) { + go func(mp *clusterv1.MachinePool) { defer GinkgoRecover() defer wg.Done() diff --git a/test/e2e/aks_public_ip_prefix.go b/test/e2e/aks_public_ip_prefix.go index 1e50f18b9b3..950be04f2cc 100644 --- a/test/e2e/aks_public_ip_prefix.go +++ b/test/e2e/aks_public_ip_prefix.go @@ -31,9 +31,8 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -102,12 +101,12 @@ func AKSPublicIPPrefixSpec(ctx context.Context, inputGetter func() AKSPublicIPPr err = mgmtClient.Create(ctx, infraMachinePool) Expect(err).NotTo(HaveOccurred()) - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Namespace: infraMachinePool.Namespace, Name: infraMachinePool.Name, }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: input.Cluster.Name, Replicas: ptr.To[int32](2), Template: clusterv1.MachineTemplateSpec{ @@ -135,7 +134,7 @@ func AKSPublicIPPrefixSpec(ctx context.Context, inputGetter func() AKSPublicIPPr Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { - err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(machinePool), &expv1.MachinePool{}) + err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(machinePool), &clusterv1.MachinePool{}) g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) }, input.WaitIntervals...).Should(Succeed(), "Deleted MachinePool %s/%s still exists", machinePool.Namespace, machinePool.Name) @@ -150,6 +149,6 @@ func AKSPublicIPPrefixSpec(ctx context.Context, inputGetter func() AKSPublicIPPr infraMachinePool := &infrav1.AzureManagedMachinePool{} err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(machinePool), infraMachinePool) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(conditions.IsTrue(infraMachinePool, infrav1.AgentPoolsReadyCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(infraMachinePool, infrav1.AgentPoolsReadyCondition)).To(BeTrue()) }, input.WaitIntervals...).Should(Succeed()) } diff --git a/test/e2e/aks_spot.go b/test/e2e/aks_spot.go index eebf129d2fa..966f20daa60 100644 --- a/test/e2e/aks_spot.go +++ b/test/e2e/aks_spot.go @@ -30,9 +30,8 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -80,12 +79,12 @@ func AKSSpotSpec(ctx context.Context, inputGetter func() AKSSpotSpecInput) { err = mgmtClient.Create(ctx, infraMachinePool) Expect(err).NotTo(HaveOccurred()) - machinePool := &expv1.MachinePool{ + machinePool := &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Namespace: infraMachinePool.Namespace, Name: infraMachinePool.Name, }, - Spec: expv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: input.Cluster.Name, Replicas: ptr.To[int32](0), Template: clusterv1.MachineTemplateSpec{ @@ -113,7 +112,7 @@ func AKSSpotSpec(ctx context.Context, inputGetter func() AKSSpotSpecInput) { Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { - err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(machinePool), &expv1.MachinePool{}) + err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(machinePool), &clusterv1.MachinePool{}) g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) }, input.WaitIntervals...).Should(Succeed(), "Deleted MachinePool %s/%s still exists", machinePool.Namespace, machinePool.Name) @@ -128,6 +127,6 @@ func AKSSpotSpec(ctx context.Context, inputGetter func() AKSSpotSpecInput) { infraMachinePool := &infrav1.AzureManagedMachinePool{} err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(machinePool), infraMachinePool) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(conditions.IsTrue(infraMachinePool, infrav1.AgentPoolsReadyCondition)).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(infraMachinePool, infrav1.AgentPoolsReadyCondition)).To(BeTrue()) }, input.WaitIntervals...).Should(Succeed()) } diff --git a/test/e2e/aks_tags.go b/test/e2e/aks_tags.go index deac4ddb610..b9557658927 100644 --- a/test/e2e/aks_tags.go +++ b/test/e2e/aks_tags.go @@ -30,8 +30,7 @@ import ( "golang.org/x/exp/maps" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -40,7 +39,7 @@ import ( type AKSAdditionalTagsSpecInput struct { Cluster *clusterv1.Cluster - MachinePools []*expv1.MachinePool + MachinePools []*clusterv1.MachinePool WaitForUpdate []interface{} } @@ -143,7 +142,7 @@ func AKSAdditionalTagsSpec(ctx context.Context, inputGetter func() AKSAdditional for _, mp := range input.MachinePools { wg.Add(1) - go func(mp *expv1.MachinePool) { + go func(mp *clusterv1.MachinePool) { defer GinkgoRecover() defer wg.Done() diff --git a/test/e2e/aks_upgrade.go b/test/e2e/aks_upgrade.go index a907f7ee890..e671a9f482f 100644 --- a/test/e2e/aks_upgrade.go +++ b/test/e2e/aks_upgrade.go @@ -27,8 +27,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,7 +36,7 @@ import ( type AKSUpgradeSpecInput struct { Cluster *clusterv1.Cluster - MachinePools []*expv1.MachinePool + MachinePools []*clusterv1.MachinePool KubernetesVersionUpgradeTo string WaitForControlPlane []interface{} WaitForMachinePools []interface{} diff --git a/test/e2e/azure_apiserver_ilb.go b/test/e2e/azure_apiserver_ilb.go index fc5d2dca488..1c6da4cf1d0 100644 --- a/test/e2e/azure_apiserver_ilb.go +++ b/test/e2e/azure_apiserver_ilb.go @@ -108,7 +108,7 @@ import ( "k8s.io/client-go/tools/remotecommand" "k8s.io/kubectl/pkg/scheme" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" diff --git a/test/e2e/azure_clusterproxy.go b/test/e2e/azure_clusterproxy.go index 26dfba9f6f3..c6b26a341cc 100644 --- a/test/e2e/azure_clusterproxy.go +++ b/test/e2e/azure_clusterproxy.go @@ -45,7 +45,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubectl/pkg/describe" "k8s.io/utils/ptr" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/controller-runtime/pkg/client" @@ -74,7 +73,7 @@ func initScheme() *runtime.Scheme { framework.TryAddDefaultSchemes(scheme) Expect(infrav1.AddToScheme(scheme)).To(Succeed()) Expect(infrav1exp.AddToScheme(scheme)).To(Succeed()) - Expect(expv1.AddToScheme(scheme)).To(Succeed()) + Expect(clusterv1beta1.AddToScheme(scheme)).To(Succeed()) Expect(asoresourcesv1.AddToScheme(scheme)).To(Succeed()) Expect(asocontainerservicev1.AddToScheme(scheme)).To(Succeed()) Expect(asocontainerservicev1preview.AddToScheme(scheme)).To(Succeed()) diff --git a/test/e2e/azure_edgezone.go b/test/e2e/azure_edgezone.go index 9c9d48c8b1b..714382da517 100644 --- a/test/e2e/azure_edgezone.go +++ b/test/e2e/azure_edgezone.go @@ -27,7 +27,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/test/e2e/azure_failuredomains.go b/test/e2e/azure_failuredomains.go index ff59dd78fb9..0272ee82bd9 100644 --- a/test/e2e/azure_failuredomains.go +++ b/test/e2e/azure_failuredomains.go @@ -27,7 +27,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" apimachinerytypes "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" ) diff --git a/test/e2e/azure_logcollector.go b/test/e2e/azure_logcollector.go index 1e34dbea45e..69c7f0f6260 100644 --- a/test/e2e/azure_logcollector.go +++ b/test/e2e/azure_logcollector.go @@ -33,10 +33,7 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" - "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/controller-runtime/pkg/client" kinderrors "sigs.k8s.io/kind/pkg/errors" @@ -57,7 +54,7 @@ const ( var _ framework.ClusterLogCollector = &AzureLogCollector{} // CollectMachineLog collects logs from a machine. -func (k AzureLogCollector) CollectMachineLog(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine, outputPath string) error { +func (k AzureLogCollector) CollectMachineLog(ctx context.Context, managementClusterClient client.Client, m *clusterv1beta1.Machine, outputPath string) error { infraGV, err := schema.ParseGroupVersion(m.Spec.InfrastructureRef.APIVersion) if err != nil { return fmt.Errorf("invalid spec.infrastructureRef.apiVersion %q: %w", m.Spec.InfrastructureRef.APIVersion, err) @@ -81,7 +78,7 @@ func (k AzureLogCollector) CollectMachineLog(ctx context.Context, managementClus } // CollectMachinePoolLog collects logs from a machine pool. -func (k AzureLogCollector) CollectMachinePoolLog(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool, outputPath string) error { +func (k AzureLogCollector) CollectMachinePoolLog(ctx context.Context, managementClusterClient client.Client, mp *clusterv1beta1.MachinePool, outputPath string) error { infraGV, err := schema.ParseGroupVersion(mp.Spec.Template.Spec.InfrastructureRef.APIVersion) if err != nil { return fmt.Errorf("invalid spec.infrastructureRef.apiVersion %q: %w", mp.Spec.Template.Spec.InfrastructureRef.APIVersion, err) @@ -110,17 +107,17 @@ func (k AzureLogCollector) CollectMachinePoolLog(ctx context.Context, management // CollectInfrastructureLogs collects log from the infrastructure. // This is currently a no-op implementation to satisfy the LogCollector interface. -func (k AzureLogCollector) CollectInfrastructureLogs(_ context.Context, _ client.Client, _ *clusterv1.Cluster, _ string) error { +func (k AzureLogCollector) CollectInfrastructureLogs(_ context.Context, _ client.Client, _ *clusterv1beta1.Cluster, _ string) error { return nil } -func collectAzureMachineLog(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine, outputPath string) error { +func collectAzureMachineLog(ctx context.Context, managementClusterClient client.Client, m *clusterv1beta1.Machine, outputPath string) error { am, err := getAzureMachine(ctx, managementClusterClient, m) if err != nil { return fmt.Errorf("get AzureMachine %s/%s: %w", m.Spec.InfrastructureRef.Namespace, m.Spec.InfrastructureRef.Name, err) } - cluster, err := util.GetClusterFromMetadata(ctx, managementClusterClient, m.ObjectMeta) + cluster, err := clusterv1beta1util.GetClusterFromMetadata(ctx, managementClusterClient, m.ObjectMeta) if err != nil { return err } @@ -136,13 +133,13 @@ func collectAzureMachineLog(ctx context.Context, managementClusterClient client. return collectVMLog(ctx, cluster, subscriptionID, resourceGroup, name, outputPath) } -func collectAzureMachinePoolLog(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool, outputPath string) error { +func collectAzureMachinePoolLog(ctx context.Context, managementClusterClient client.Client, mp *clusterv1beta1.MachinePool, outputPath string) error { am, err := getAzureMachinePool(ctx, managementClusterClient, mp) if err != nil { return fmt.Errorf("get AzureMachinePool %s/%s: %w", mp.Spec.Template.Spec.InfrastructureRef.Namespace, mp.Spec.Template.Spec.InfrastructureRef.Name, err) } - cluster, err := util.GetClusterFromMetadata(ctx, managementClusterClient, mp.ObjectMeta) + cluster, err := clusterv1beta1util.GetClusterFromMetadata(ctx, managementClusterClient, mp.ObjectMeta) if err != nil { return err } @@ -158,7 +155,7 @@ func collectAzureMachinePoolLog(ctx context.Context, managementClusterClient cli return collectVMSSLog(ctx, cluster, subscriptionID, resourceGroup, name, outputPath) } -func collectVMLog(ctx context.Context, cluster *clusterv1.Cluster, subscriptionID, resourceGroup, name, outputPath string) error { +func collectVMLog(ctx context.Context, cluster *clusterv1beta1.Cluster, subscriptionID, resourceGroup, name, outputPath string) error { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { return errors.Wrap(err, "failed to get default azure credential") @@ -198,7 +195,7 @@ func collectVMLog(ctx context.Context, cluster *clusterv1.Cluster, subscriptionI return kinderrors.NewAggregate(errs) } -func collectVMSSLog(ctx context.Context, cluster *clusterv1.Cluster, subscriptionID, resourceGroup, name, outputPath string) error { +func collectVMSSLog(ctx context.Context, cluster *clusterv1beta1.Cluster, subscriptionID, resourceGroup, name, outputPath string) error { vmssID := azure.VMSSID(subscriptionID, resourceGroup, name) cred, err := azidentity.NewDefaultAzureCredential(nil) @@ -323,7 +320,7 @@ func collectVMSSLog(ctx context.Context, cluster *clusterv1.Cluster, subscriptio } // collectLogsFromNode collects logs from various sources by ssh'ing into the node -func collectLogsFromNode(cluster *clusterv1.Cluster, hostname string, isWindows bool, outputPath string) error { +func collectLogsFromNode(cluster *clusterv1beta1.Cluster, hostname string, isWindows bool, outputPath string) error { nodeOSType := azure.LinuxOS if isWindows { nodeOSType = azure.WindowsOS @@ -393,7 +390,7 @@ func getAzureASOManagedCluster(ctx context.Context, managementClusterClient clie return azManagedCluster, err } -func getAzureMachine(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine) (*infrav1.AzureMachine, error) { +func getAzureMachine(ctx context.Context, managementClusterClient client.Client, m *clusterv1beta1.Machine) (*infrav1.AzureMachine, error) { key := client.ObjectKey{ Namespace: m.Spec.InfrastructureRef.Namespace, Name: m.Spec.InfrastructureRef.Name, @@ -404,7 +401,7 @@ func getAzureMachine(ctx context.Context, managementClusterClient client.Client, return azMachine, err } -func getAzureMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1exp.AzureMachinePool, error) { +func getAzureMachinePool(ctx context.Context, managementClusterClient client.Client, mp *clusterv1beta1.MachinePool) (*infrav1exp.AzureMachinePool, error) { key := client.ObjectKey{ Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace, Name: mp.Spec.Template.Spec.InfrastructureRef.Name, diff --git a/test/e2e/azure_machinepools.go b/test/e2e/azure_machinepools.go index e60c2ca65c8..d0d60380edf 100644 --- a/test/e2e/azure_machinepools.go +++ b/test/e2e/azure_machinepools.go @@ -31,8 +31,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/controller-runtime/pkg/client" @@ -81,7 +80,7 @@ func AzureMachinePoolsSpec(ctx context.Context, inputGetter func() AzureMachineP ampList := &infrav1exp.AzureMachinePoolList{} Expect(mgmtClient.List(ctx, ampList, client.InNamespace(input.Namespace.Name), client.MatchingLabels(clusterLabels))).To(Succeed()) Expect(ampList.Items).NotTo(BeEmpty()) - machinepools := []*expv1.MachinePool{} + machinepools := []*clusterv1.MachinePool{} for _, amp := range ampList.Items { Byf("checking AzureMachinePool %s in %s orchestration mode", amp.Name, amp.Spec.OrchestrationMode) Expect(amp.Status.Replicas).To(BeNumerically("==", len(amp.Spec.ProviderIDList))) @@ -104,14 +103,14 @@ func AzureMachinePoolsSpec(ctx context.Context, inputGetter func() AzureMachineP goalReplicas := ptr.Deref[int32](mp.Spec.Replicas, 0) + 1 Byf("Scaling machine pool %s out from %d to %d", mp.Name, *mp.Spec.Replicas, goalReplicas) wg.Add(1) - go func(mp *expv1.MachinePool) { + go func(mp *clusterv1.MachinePool) { defer GinkgoRecover() defer wg.Done() framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{ ClusterProxy: bootstrapClusterProxy, Cluster: input.Cluster, Replicas: goalReplicas, - MachinePools: []*expv1.MachinePool{mp}, + MachinePools: []*clusterv1.MachinePool{mp}, WaitForMachinePoolToScale: input.WaitIntervals, }) }(mp) @@ -122,14 +121,14 @@ func AzureMachinePoolsSpec(ctx context.Context, inputGetter func() AzureMachineP goalReplicas := ptr.Deref[int32](mp.Spec.Replicas, 0) - 1 Byf("Scaling machine pool %s in from %d to %d", mp.Name, *mp.Spec.Replicas, goalReplicas) wg.Add(1) - go func(mp *expv1.MachinePool) { + go func(mp *clusterv1.MachinePool) { defer GinkgoRecover() defer wg.Done() framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{ ClusterProxy: bootstrapClusterProxy, Cluster: input.Cluster, Replicas: goalReplicas, - MachinePools: []*expv1.MachinePool{mp}, + MachinePools: []*clusterv1.MachinePool{mp}, WaitForMachinePoolToScale: input.WaitIntervals, }) }(mp) diff --git a/test/e2e/azure_privatecluster.go b/test/e2e/azure_privatecluster.go index 9c341a051d1..e169aa1ea8c 100644 --- a/test/e2e/azure_privatecluster.go +++ b/test/e2e/azure_privatecluster.go @@ -34,7 +34,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" diff --git a/test/e2e/azure_securitygroups.go b/test/e2e/azure_securitygroups.go index 257f747e32b..94d0d69d837 100644 --- a/test/e2e/azure_securitygroups.go +++ b/test/e2e/azure_securitygroups.go @@ -28,7 +28,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/test/e2e/azure_selfhosted.go b/test/e2e/azure_selfhosted.go index 0aab94b486b..b00d9fa3777 100644 --- a/test/e2e/azure_selfhosted.go +++ b/test/e2e/azure_selfhosted.go @@ -31,7 +31,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" diff --git a/test/e2e/azure_vmextensions.go b/test/e2e/azure_vmextensions.go index 8a8b94a58bf..c55e83ee00a 100644 --- a/test/e2e/azure_vmextensions.go +++ b/test/e2e/azure_vmextensions.go @@ -27,7 +27,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/test/e2e/capi_test.go b/test/e2e/capi_test.go index 22379399124..15a12aa0b81 100644 --- a/test/e2e/capi_test.go +++ b/test/e2e/capi_test.go @@ -30,7 +30,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" @@ -279,7 +278,7 @@ var _ = Describe("Running the Cluster API E2E tests", func() { InitWithInfrastructureProviders: []string{"azure:" + e2eConfig.MustGetVariable(OldProviderUpgradeVersion)}, Upgrades: []capi_e2e.ClusterctlUpgradeSpecInputUpgrade{ { - Contract: clusterv1.GroupVersion.Version, + Contract: clusterv1beta1.GroupVersion.Version, PostUpgrade: func(managementClusterProxy framework.ClusterProxy, clusterNamespace, clusterName string) { AKSMachinePoolPostUpgradeSpec(ctx, func() AKSMachinePoolPostUpgradeSpecInput { return AKSMachinePoolPostUpgradeSpecInput{ @@ -317,7 +316,7 @@ var _ = Describe("Running the Cluster API E2E tests", func() { InitWithInfrastructureProviders: []string{"azure:" + e2eConfig.MustGetVariable(LatestProviderUpgradeVersion)}, Upgrades: []capi_e2e.ClusterctlUpgradeSpecInputUpgrade{ { - Contract: clusterv1.GroupVersion.Version, + Contract: clusterv1beta1.GroupVersion.Version, PostUpgrade: func(managementClusterProxy framework.ClusterProxy, clusterNamespace, clusterName string) { AKSMachinePoolPostUpgradeSpec(ctx, func() AKSMachinePoolPostUpgradeSpecInput { return AKSMachinePoolPostUpgradeSpecInput{ diff --git a/test/e2e/cloud-provider-azure.go b/test/e2e/cloud-provider-azure.go index 59dd4383a5a..896196db87e 100644 --- a/test/e2e/cloud-provider-azure.go +++ b/test/e2e/cloud-provider-azure.go @@ -25,7 +25,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) diff --git a/test/e2e/common.go b/test/e2e/common.go index 6e32532998b..0e6c1fa4549 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -38,12 +38,11 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kubeadmv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + kubeadmv1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/controller-runtime/pkg/client" @@ -147,7 +146,7 @@ type cleanupInput struct { ArtifactFolder string Namespace *corev1.Namespace CancelWatches context.CancelFunc - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster IntervalsGetter func(spec, key string) []interface{} SkipCleanup bool SkipLogCollection bool @@ -304,12 +303,12 @@ func ensureContolPlaneReplicasMatch(ctx context.Context, proxy framework.Cluster inClustersNamespaceListOption := client.InNamespace(ns) // ControlPlane labels matchClusterListOption := client.MatchingLabels{ - clusterv1.MachineControlPlaneLabel: "", - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.MachineControlPlaneLabel: "", + clusterv1beta1.ClusterNameLabel: clusterName, } Eventually(func() (int, error) { - machineList := &clusterv1.MachineList{} + machineList := &clusterv1beta1.MachineList{} lister := proxy.GetClient() if err := lister.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption); err != nil { Logf("Failed to list the machines: %+v", err) @@ -317,7 +316,7 @@ func ensureContolPlaneReplicasMatch(ctx context.Context, proxy framework.Cluster } count := 0 for _, machine := range machineList.Items { - if condition := conditions.Get(&machine, clusterv1.MachineReadyV1Beta2Condition); condition != nil && condition.Status == corev1.ConditionTrue { + if condition := v1beta1conditions.Get(&machine, clusterv1beta1.MachineReadyV1Beta2Condition); condition != nil && condition.Status == corev1.ConditionTrue { count++ } } diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index b89ed3fb755..5e0dc89b7ce 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -3,11 +3,11 @@ managementClusterName: capz-e2e images: - name: ${MANAGER_IMAGE} loadBehavior: mustLoad - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.10.7 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.11.2 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.10.7 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.11.2 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.10.7 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.11.2 loadBehavior: tryLoad - name: registry.k8s.io/cluster-api-helm/cluster-api-helm-controller:v0.4.1 loadBehavior: tryLoad @@ -16,8 +16,8 @@ providers: - name: cluster-api type: CoreProvider versions: - - name: v1.9.11 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.9.11/core-components.yaml" + - name: v1.10.7 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.7/core-components.yaml" type: "url" contract: v1beta1 replacements: @@ -25,8 +25,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - - name: v1.10.7 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.7/core-components.yaml + - name: v1.11.2 + value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.2/core-components.yaml type: url contract: v1beta1 files: @@ -39,8 +39,8 @@ providers: - name: kubeadm type: BootstrapProvider versions: - - name: v1.9.11 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.9.11/bootstrap-components.yaml" + - name: v1.10.7 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.7/bootstrap-components.yaml" type: "url" contract: v1beta1 replacements: @@ -48,8 +48,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - - name: v1.10.7 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.7/bootstrap-components.yaml + - name: v1.11.2 + value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.2/bootstrap-components.yaml type: url contract: v1beta1 files: @@ -61,8 +61,8 @@ providers: - name: kubeadm type: ControlPlaneProvider versions: - - name: v1.9.11 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.9.11/control-plane-components.yaml" + - name: v1.10.7 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.7/control-plane-components.yaml" type: "url" contract: v1beta1 replacements: @@ -70,8 +70,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - - name: v1.10.7 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.7/control-plane-components.yaml + - name: v1.11.2 + value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.2/control-plane-components.yaml type: url contract: v1beta1 files: @@ -252,8 +252,8 @@ variables: KUBETEST_CONFIGURATION: "./data/kubetest/conformance.yaml" WINDOWS_CONTAINERD_URL: "${WINDOWS_CONTAINERD_URL:-}" AZURE_CNI_V1_MANIFEST_PATH: "${PWD}/templates/addons/azure-cni-v1.yaml" - OLD_CAPI_UPGRADE_VERSION: "v1.9.11" - LATEST_CAPI_UPGRADE_VERSION: "v1.10.7" + OLD_CAPI_UPGRADE_VERSION: "v1.10.7" + LATEST_CAPI_UPGRADE_VERSION: "v1.11.2" OLD_PROVIDER_UPGRADE_VERSION: "v1.20.4" LATEST_PROVIDER_UPGRADE_VERSION: "v1.21.1" OLD_CAAPH_UPGRADE_VERSION: "v0.1.0-alpha.10" diff --git a/test/e2e/daemonsets.go b/test/e2e/daemonsets.go index a4ae7466eeb..1a2316a340a 100644 --- a/test/e2e/daemonsets.go +++ b/test/e2e/daemonsets.go @@ -25,7 +25,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - kubeadmv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + kubeadmv1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/test/e2e/data/shared/v1beta1/metadata.yaml b/test/e2e/data/shared/v1beta1/metadata.yaml index c95dcf9bd96..b171bcf25b8 100644 --- a/test/e2e/data/shared/v1beta1/metadata.yaml +++ b/test/e2e/data/shared/v1beta1/metadata.yaml @@ -1,6 +1,9 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: + - major: 1 + minor: 11 + contract: v1beta1 - major: 1 minor: 10 contract: v1beta1 diff --git a/test/e2e/kubernetes/deployment/deployment.go b/test/e2e/kubernetes/deployment/deployment.go index e3806fc67e1..4adbc20d84d 100644 --- a/test/e2e/kubernetes/deployment/deployment.go +++ b/test/e2e/kubernetes/deployment/deployment.go @@ -34,7 +34,6 @@ import ( "k8s.io/client-go/kubernetes" typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) const ( @@ -230,8 +229,8 @@ func (d *Builder) AddMachinePoolSelectors(machinePoolName string) *Builder { d.deployment.Spec.Template.Spec.NodeSelector = map[string]string{} } - d.deployment.Spec.Template.Spec.NodeSelector[clusterv1.OwnerKindAnnotation] = "MachinePool" - d.deployment.Spec.Template.Spec.NodeSelector[clusterv1.OwnerNameAnnotation] = machinePoolName + d.deployment.Spec.Template.Spec.NodeSelector[clusterv1beta1.OwnerKindAnnotation] = "MachinePool" + d.deployment.Spec.Template.Spec.NodeSelector[clusterv1beta1.OwnerNameAnnotation] = machinePoolName return d } diff --git a/util/azure/azure.go b/util/azure/azure.go index 403565a621b..d7754d22b07 100644 --- a/util/azure/azure.go +++ b/util/azure/azure.go @@ -23,7 +23,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" "github.com/pkg/errors" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -42,9 +42,9 @@ func IsAzureSystemNodeLabelKey(labelKey string) bool { } // FindParentMachinePool finds the parent MachinePool for the AzureMachinePool. -func FindParentMachinePool(ampName string, cli client.Client) (*expv1.MachinePool, error) { +func FindParentMachinePool(ampName string, cli client.Client) (*clusterv1beta1.MachinePool, error) { ctx := context.Background() - machinePoolList := &expv1.MachinePoolList{} + machinePoolList := &clusterv1beta1.MachinePoolList{} if err := cli.List(ctx, machinePoolList); err != nil { return nil, errors.Wrapf(err, "failed to list MachinePools for %s", ampName) } @@ -57,7 +57,7 @@ func FindParentMachinePool(ampName string, cli client.Client) (*expv1.MachinePoo } // FindParentMachinePoolWithRetry finds the parent MachinePool for the AzureMachinePool with retry. -func FindParentMachinePoolWithRetry(ampName string, cli client.Client, maxAttempts int) (*expv1.MachinePool, error) { +func FindParentMachinePoolWithRetry(ampName string, cli client.Client, maxAttempts int) (*clusterv1beta1.MachinePool, error) { for i := 1; ; i++ { p, err := FindParentMachinePool(ampName, cli) if err != nil { diff --git a/util/azure/azure_test.go b/util/azure/azure_test.go index 75733a2a28b..41773601ff7 100644 --- a/util/azure/azure_test.go +++ b/util/azure/azure_test.go @@ -21,7 +21,7 @@ import ( "testing" . "github.com/onsi/gomega" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -113,9 +113,9 @@ type mockClient struct { } func (m mockClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - mp := &expv1.MachinePool{} + mp := &clusterv1beta1.MachinePool{} mp.Spec.Template.Spec.InfrastructureRef.Name = "mock-machinepool-mp-0" - list.(*expv1.MachinePoolList).Items = []expv1.MachinePool{*mp} + list.(*clusterv1beta1.MachinePoolList).Items = []clusterv1beta1.MachinePool{*mp} return nil } diff --git a/util/v1beta1/clusters.go b/util/v1beta1/clusters.go new file mode 100644 index 00000000000..808669feb75 --- /dev/null +++ b/util/v1beta1/clusters.go @@ -0,0 +1,74 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + // ErrNoCluster is returned when the cluster + // label could not be found on the object passed in. + ErrNoCluster = fmt.Errorf("no %q label present", clusterv1beta1.ClusterNameLabel) +) + +// GetClusterFromMetadata returns the Cluster object (if present) using the object metadata. +func GetClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.Cluster, error) { + if obj.Labels[clusterv1beta1.ClusterNameLabel] == "" { + return nil, errors.WithStack(ErrNoCluster) + } + return GetClusterByName(ctx, c, obj.Namespace, obj.Labels[clusterv1beta1.ClusterNameLabel]) +} + +// GetOwnerCluster returns the Cluster object owning the current resource. +func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.Cluster, error) { + for _, ref := range obj.GetOwnerReferences() { + if ref.Kind != "Cluster" { + continue + } + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, errors.WithStack(err) + } + if gv.Group == clusterv1beta1.GroupVersion.Group { + return GetClusterByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// GetClusterByName finds and return a Cluster object using the specified params. +func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.Cluster, error) { + cluster := &clusterv1beta1.Cluster{} + key := client.ObjectKey{ + Namespace: namespace, + Name: name, + } + + if err := c.Get(ctx, key, cluster); err != nil { + return nil, errors.Wrapf(err, "failed to get Cluster/%s", name) + } + + return cluster, nil +} diff --git a/util/v1beta1/helpers.go b/util/v1beta1/helpers.go new file mode 100644 index 00000000000..88cc08ac2b9 --- /dev/null +++ b/util/v1beta1/helpers.go @@ -0,0 +1,45 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) + +// IsPaused returns true if the Cluster is paused or the object has the `paused` annotation. +func IsPaused(cluster *clusterv1beta1.Cluster, o metav1.Object) bool { + if cluster.Spec.Paused { + return true + } + return HasPaused(o) +} + +// HasPaused returns true if the object has the `paused` annotation. +func HasPaused(o metav1.Object) bool { + return hasAnnotation(o, clusterv1beta1.PausedAnnotation) +} + +// hasAnnotation returns true if the object has the specified annotation. +func hasAnnotation(o metav1.Object, annotation string) bool { + annotations := o.GetAnnotations() + if annotations == nil { + return false + } + _, ok := annotations[annotation] + return ok +} diff --git a/util/v1beta1/machinepools.go b/util/v1beta1/machinepools.go new file mode 100644 index 00000000000..16317b0b742 --- /dev/null +++ b/util/v1beta1/machinepools.go @@ -0,0 +1,54 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetMachinePoolByName finds and returns a MachinePool object using the specified params. +func GetMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.MachinePool, error) { + m := &clusterv1beta1.MachinePool{} + key := client.ObjectKey{Name: name, Namespace: namespace} + if err := c.Get(ctx, key, m); err != nil { + return nil, err + } + return m, nil +} + +// GetOwnerMachinePool returns the MachinePool objects owning the current resource. +func GetOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.MachinePool, error) { + for _, ref := range obj.GetOwnerReferences() { + if ref.Kind != "MachinePool" { + continue + } + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, errors.WithStack(err) + } + if gv.Group == clusterv1beta1.GroupVersion.Group { + return GetMachinePoolByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} diff --git a/util/v1beta1/machines.go b/util/v1beta1/machines.go new file mode 100644 index 00000000000..acc14c20f52 --- /dev/null +++ b/util/v1beta1/machines.go @@ -0,0 +1,57 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// IsControlPlaneMachine returns true if the provided resource is +// a member of the control plane. +func IsControlPlaneMachine(machine metav1.Object) bool { + _, ok := machine.GetLabels()[clusterv1beta1.MachineControlPlaneLabel] + return ok +} + +// GetOwnerMachine returns the Machine object owning the current resource. +func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.Machine, error) { + for _, ref := range obj.GetOwnerReferences() { + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, err + } + if ref.Kind == "Machine" && gv.Group == clusterv1beta1.GroupVersion.Group { + return GetMachineByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// GetMachineByName finds and return a Machine object using the specified params. +func GetMachineByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.Machine, error) { + m := &clusterv1beta1.Machine{} + key := client.ObjectKey{Name: name, Namespace: namespace} + if err := c.Get(ctx, key, m); err != nil { + return nil, err + } + return m, nil +}