diff --git a/api/v1alpha1/harvestermachine_types.go b/api/v1alpha1/harvestermachine_types.go index 794921a..dc8d7bc 100644 --- a/api/v1alpha1/harvestermachine_types.go +++ b/api/v1alpha1/harvestermachine_types.go @@ -110,7 +110,7 @@ type VolumeType string // HarvesterMachineStatus defines the observed state of HarvesterMachine. type HarvesterMachineStatus struct { // Ready is true when the provider resource is ready. - Ready bool `json:"ready"` + Ready bool `json:"ready,omitempty"` Conditions []capiv1beta1.Condition `json:"conditions,omitempty"` diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_harvesterclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_harvesterclusters.yaml index 3762829..8d8d231 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_harvesterclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_harvesterclusters.yaml @@ -32,7 +32,7 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: HarvesterCluster is the Schema for the harvesterclusters API + description: HarvesterCluster is the Schema for the harvesterclusters API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -47,7 +47,7 @@ spec: metadata: type: object spec: - description: HarvesterClusterSpec defines the desired state of HarvesterCluster + description: HarvesterClusterSpec defines the desired state of HarvesterCluster. properties: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to @@ -69,11 +69,11 @@ spec: file. properties: name: - description: Name is the name of the required Identity Secret + description: Name is the name of the required Identity Secret. type: string namespace: description: Namespace is the namespace in which the required - Identity Secret should be found + Identity Secret should be found. type: string required: - name @@ -89,21 +89,23 @@ spec: type: string ipPool: description: IpPool defines a new IpPool that will be added to - Harvester. This field is mutually exclusive with "IpPoolRef" + Harvester. This field is mutually exclusive with "IpPoolRef". properties: gateway: description: Gateway is the IP Address that should be used by the Gateway on the Subnet. It should be a valid address - inside the subnet e.g. 172.17.1.1 + inside the subnet. e.g. 172.17.1.1. type: string subnet: description: Subnet is a string describing the subnet that should be used by the IP Pool, it should have the CIDR Format - of an IPv4 Address e.g. 172.17.1.0/24 + of an IPv4 Address. e.g. 172.17.1.0/24. type: string vmNetwork: description: VMNetwork is the name of an existing VM Network - in Harvester where the IPPool should exist. + in Harvester where the IPPool should exist. The reference + can have the format "namespace/name" or just "name" if the + object is in the same namespace as the HarvesterCluster. type: string required: - gateway @@ -111,14 +113,14 @@ spec: - vmNetwork type: object ipPoolRef: - description: IpPoolRef is a reference to an existing IpPool object - in Harvester's cluster in the same namespace. This field is - mutually exclusive with "ipPool" + description: 'IpPoolRef is a reference to an existing IpPool object + in Harvester''s cluster. This field is mutually exclusive with + "ipPool". TODO: To be implemented' type: string ipamType: description: IPAMType is the configuration of IP addressing for the control plane load balancer. This can take two values, either - "dhcp" or "ippool" + "dhcp" or "ippool". enum: - dhcp - pool @@ -128,25 +130,25 @@ spec: on the load balancer. items: description: Listener is a description of a new Listener to - be created on the Load Balancer + be created on the Load Balancer. properties: backendPort: description: TargetPort is the port that the listener should - forward traffic to + forward traffic to. format: int32 type: integer name: - description: Name is the name of the listener + description: Name is the name of the listener. type: string port: description: Port is the port that the listener should listen - on + on. format: int32 type: integer protocol: default: TCP description: Protocol is the protocol that the listener - should use, either TCP or UDP + should use, either TCP or UDP. enum: - TCP - UDP @@ -166,7 +168,7 @@ spec: type: string targetNamespace: description: TargetNamespace is the namespace on the Harvester cluster - where VMs, Load Balancers, etc. should be created + where VMs, Load Balancers, etc. should be created. type: string required: - identitySecret @@ -174,7 +176,7 @@ spec: - targetNamespace type: object status: - description: HarvesterClusterStatus defines the observed state of HarvesterCluster + description: HarvesterClusterStatus defines the observed state of HarvesterCluster. properties: conditions: description: Conditions defines current service state of the Harvester @@ -224,15 +226,15 @@ spec: type: array failureMessage: description: FailureMessage is a full error message dump of the above - failureReason + failureReason. type: string failureReason: description: FailureReason is the short name for the reason why a - failure might be happening that makes the cluster not ready + failure might be happening that makes the cluster not ready. type: string ready: description: Reddy describes if the Harvester Cluster can be considered - ready for machine creation + ready for machine creation. type: boolean required: - ready diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_harvestermachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_harvestermachines.yaml index f3dbc7b..09f314b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_harvestermachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_harvestermachines.yaml @@ -18,7 +18,7 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: HarvesterMachine is the Schema for the harvestermachines API + description: HarvesterMachine is the Schema for the harvestermachines API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -33,7 +33,7 @@ spec: metadata: type: object spec: - description: HarvesterMachineSpec defines the desired state of HarvesterMachine + description: HarvesterMachineSpec defines the desired state of HarvesterMachine. properties: cpu: description: CPU is the number of CPU to assign to the VM. @@ -44,11 +44,12 @@ spec: type: string memory: description: Memory is the memory size to assign to the VM (should - be similar to pod.spec.containers.resources.limits) + be similar to pod.spec.containers.resources.limits). type: string networks: - description: Networks is a list of Networks to attach to the VM. Networks - are referenced by their names. + description: Networks is a list of Networks to attach to the VM. Each + item in the list can have the format "namespace/name" or just "name" + if the object is in the same namespace as the HarvesterMachine. items: type: string type: array @@ -250,13 +251,15 @@ spec: x-kubernetes-map-type: atomic type: object providerID: - description: ProviderID will be the ID of the machine used by the - controller. This will be "-" + description: ProviderID will be the ID of the VM in the provider (Harvester). + This is set by the Cloud provider on the Workload cluster node and + replicated by CAPI. type: string sshKeyPair: description: SSHKeyPair is the name of the SSH key pair to use for - SSH access to the VM (this keyPair should be created in Harvester) + SSH access to the VM (this keyPair should be created in Harvester). + The reference can be in the format "namespace/name" or just "name" + if the object is in the same namespace as the HarvesterMachine. type: string sshUser: description: SSHUser is the user that should be used to connect to @@ -268,12 +271,15 @@ spec: properties: bootOrder: description: BootOrder is an integer that determines the order - of priority of volumes for booting the VM If absent, the sequence - with which volumes appear in the manifest will be used. + of priority of volumes for booting the VM. If absent, the + sequence with which volumes appear in the manifest will be + used. type: integer imageName: description: ImageName is the name of the image to use if the - volumeType is "image" + volumeType is "image" ImageName can be in the format "namespace/name" + or just "name" if the object is in the same namespace as the + HarvesterMachine. type: string storageClass: description: StorageClass is the name of the storage class to @@ -598,7 +604,7 @@ spec: - volumes type: object status: - description: HarvesterMachineStatus defines the observed state of HarvesterMachine + description: HarvesterMachineStatus defines the observed state of HarvesterMachine. properties: addresses: items: @@ -668,8 +674,6 @@ spec: ready: description: Ready is true when the provider resource is ready. type: boolean - required: - - ready type: object type: object served: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_harvestermachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_harvestermachinetemplates.yaml index 8277242..23a208e 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_harvestermachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_harvestermachinetemplates.yaml @@ -19,7 +19,7 @@ spec: schema: openAPIV3Schema: description: HarvesterMachineTemplate is the Schema for the harvestermachinetemplates - API + API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -35,7 +35,7 @@ spec: type: object spec: description: HarvesterMachineTemplateSpec defines the desired state of - HarvesterMachineTemplate + HarvesterMachineTemplate. properties: template: description: Template is the HarvesterMachineTemplate template @@ -53,11 +53,13 @@ spec: type: string memory: description: Memory is the memory size to assign to the VM - (should be similar to pod.spec.containers.resources.limits) + (should be similar to pod.spec.containers.resources.limits). type: string networks: description: Networks is a list of Networks to attach to the - VM. Networks are referenced by their names. + VM. Each item in the list can have the format "namespace/name" + or just "name" if the object is in the same namespace as + the HarvesterMachine. items: type: string type: array @@ -279,14 +281,16 @@ spec: x-kubernetes-map-type: atomic type: object providerID: - description: ProviderID will be the ID of the machine used - by the controller. This will be "-" + description: ProviderID will be the ID of the VM in the provider + (Harvester). This is set by the Cloud provider on the Workload + cluster node and replicated by CAPI. type: string sshKeyPair: description: SSHKeyPair is the name of the SSH key pair to use for SSH access to the VM (this keyPair should be created - in Harvester) + in Harvester). The reference can be in the format "namespace/name" + or just "name" if the object is in the same namespace as + the HarvesterMachine. type: string sshUser: description: SSHUser is the user that should be used to connect @@ -299,13 +303,15 @@ spec: properties: bootOrder: description: BootOrder is an integer that determines - the order of priority of volumes for booting the VM + the order of priority of volumes for booting the VM. If absent, the sequence with which volumes appear in the manifest will be used. type: integer imageName: description: ImageName is the name of the image to use - if the volumeType is "image" + if the volumeType is "image" ImageName can be in the + format "namespace/name" or just "name" if the object + is in the same namespace as the HarvesterMachine. type: string storageClass: description: StorageClass is the name of the storage diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_infraclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_infraclustertemplates.yaml index be89b06..9554cc5 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_infraclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_infraclustertemplates.yaml @@ -44,7 +44,7 @@ spec: template: properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.' properties: annotations: additionalProperties: @@ -66,7 +66,7 @@ spec: type: object spec: description: HarvesterClusterSpec defines the desired state of - HarvesterCluster + HarvesterCluster. properties: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint @@ -89,11 +89,11 @@ spec: properties: name: description: Name is the name of the required Identity - Secret + Secret. type: string namespace: description: Namespace is the namespace in which the required - Identity Secret should be found + Identity Secret should be found. type: string required: - name @@ -110,22 +110,24 @@ spec: ipPool: description: IpPool defines a new IpPool that will be added to Harvester. This field is mutually exclusive - with "IpPoolRef" + with "IpPoolRef". properties: gateway: description: Gateway is the IP Address that should be used by the Gateway on the Subnet. It should - be a valid address inside the subnet e.g. 172.17.1.1 + be a valid address inside the subnet. e.g. 172.17.1.1. type: string subnet: description: Subnet is a string describing the subnet that should be used by the IP Pool, it should have - the CIDR Format of an IPv4 Address e.g. 172.17.1.0/24 + the CIDR Format of an IPv4 Address. e.g. 172.17.1.0/24. type: string vmNetwork: description: VMNetwork is the name of an existing VM Network in Harvester where the IPPool should - exist. + exist. The reference can have the format "namespace/name" + or just "name" if the object is in the same namespace + as the HarvesterCluster. type: string required: - gateway @@ -133,14 +135,14 @@ spec: - vmNetwork type: object ipPoolRef: - description: IpPoolRef is a reference to an existing IpPool - object in Harvester's cluster in the same namespace. - This field is mutually exclusive with "ipPool" + description: 'IpPoolRef is a reference to an existing + IpPool object in Harvester''s cluster. This field is + mutually exclusive with "ipPool". TODO: To be implemented' type: string ipamType: description: IPAMType is the configuration of IP addressing for the control plane load balancer. This can take two - values, either "dhcp" or "ippool" + values, either "dhcp" or "ippool". enum: - dhcp - pool @@ -150,25 +152,25 @@ spec: be created on the load balancer. items: description: Listener is a description of a new Listener - to be created on the Load Balancer + to be created on the Load Balancer. properties: backendPort: description: TargetPort is the port that the listener - should forward traffic to + should forward traffic to. format: int32 type: integer name: - description: Name is the name of the listener + description: Name is the name of the listener. type: string port: description: Port is the port that the listener - should listen on + should listen on. format: int32 type: integer protocol: default: TCP description: Protocol is the protocol that the listener - should use, either TCP or UDP + should use, either TCP or UDP. enum: - TCP - UDP @@ -188,7 +190,7 @@ spec: type: string targetNamespace: description: TargetNamespace is the namespace on the Harvester - cluster where VMs, Load Balancers, etc. should be created + cluster where VMs, Load Balancers, etc. should be created. type: string required: - identitySecret diff --git a/controllers/harvestercluster_controller.go b/controllers/harvestercluster_controller.go index 1bcd29f..29a92ba 100644 --- a/controllers/harvestercluster_controller.go +++ b/controllers/harvestercluster_controller.go @@ -19,8 +19,11 @@ package controllers import ( "context" "fmt" + "net" "time" + current "github.com/containernetworking/cni/pkg/types/100" + "github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator" "github.com/go-logr/logr" lbv1beta1 "github.com/harvester/harvester-load-balancer/pkg/apis/loadbalancer.harvesterhci.io/v1beta1" "github.com/pkg/errors" @@ -100,7 +103,7 @@ func (r *HarvesterClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req var cluster infrav1.HarvesterCluster if err := r.Get(ctx, req.NamespacedName, &cluster); err != nil { if apierrors.IsNotFound(err) { - logger.Error(err, "cluster not found", "cluster-name", req.NamespacedName.Name, "cluster-namespace", req.NamespacedName.Namespace) + logger.Info("cluster not found", "cluster-name", req.NamespacedName.Name, "cluster-namespace", req.NamespacedName.Namespace) return ctrl.Result{}, nil } @@ -277,8 +280,7 @@ func (r *HarvesterClusterReconciler) ReconcileNormal(scope ClusterScope) (res ct } if len(ownedCPHarvesterMachines) == 0 { - logger.Info("no ControlPlane Machines exist yet for cluster, skipping Load Balancer creation and Requeing ...") - + lbNamespacedName := scope.HarvesterCluster.Spec.TargetNamespace + "/" + scope.HarvesterCluster.Namespace + "-" + scope.HarvesterCluster.Name + "-lb" // Create a placeholder LoadBalancer svc to avoid blocking the CAPI Controller existingPlaceholderLB, err1 := scope.HarvesterClient.CoreV1().Services(scope.HarvesterCluster.Spec.TargetNamespace).Get( scope.Ctx, @@ -291,6 +293,15 @@ func (r *HarvesterClusterReconciler) ReconcileNormal(scope ClusterScope) (res ct return ctrl.Result{RequeueAfter: requeueTimeFiveMinutes}, err1 } else { + lbIP := "0.0.0.0" + if scope.HarvesterCluster.Spec.LoadBalancerConfig.IPAMType == infrav1.POOL { + lbIP, err = getIPFromIPPool(scope, lbNamespacedName) + if err != nil { + logger.Error(err, "could not get IP from IP Pool") + + return ctrl.Result{RequeueAfter: requeueTimeThirtySeconds}, err + } + } placeholderSVC := &apiv1.Service{ ObjectMeta: v1.ObjectMeta{ Name: scope.HarvesterCluster.Namespace + "-" + scope.HarvesterCluster.Name + "-lb", @@ -318,7 +329,7 @@ func (r *HarvesterClusterReconciler) ReconcileNormal(scope ClusterScope) (res ct return &policy }(), - LoadBalancerIP: "0.0.0.0", + LoadBalancerIP: lbIP, }, } @@ -345,7 +356,35 @@ func (r *HarvesterClusterReconciler) ReconcileNormal(scope ClusterScope) (res ct if len(existingPlaceholderLB.Status.LoadBalancer.Ingress) == 0 || existingPlaceholderLB.Status.LoadBalancer.Ingress[0].IP == "" { logger.Info("placeholder LoadBalancer IP is empty, waiting for IP to be set ...") - return ctrl.Result{RequeueAfter: requeueTimeThirtySeconds}, nil + if scope.HarvesterCluster.Spec.LoadBalancerConfig.IPAMType == infrav1.POOL { + ipPool, err := scope.HarvesterClient.LoadbalancerV1beta1().IPPools().Get(context.TODO(), + scope.HarvesterCluster.Spec.LoadBalancerConfig.IpPoolRef, v1.GetOptions{}) + if err != nil { + return ctrl.Result{RequeueAfter: requeueTimeThirtySeconds}, err + } + + if ipPool.Status.AllocatedHistory == nil { + return ctrl.Result{RequeueAfter: requeueTimeThirtySeconds}, fmt.Errorf("IP Pool %s does not have any allocated IPs", ipPool.Name) + } + + for k, v := range ipPool.Status.AllocatedHistory { + if lbNamespacedName == v { + existingPlaceholderLB.Spec.LoadBalancerIP = k + + _, err = scope.HarvesterClient.CoreV1().Services(scope.HarvesterCluster.Spec.TargetNamespace).Update(context.TODO(), + existingPlaceholderLB, v1.UpdateOptions{}) + if err != nil { + err = errors.Wrap(err, "could not update the placeholder LoadBalancer") + + return ctrl.Result{RequeueAfter: requeueTimeThirtySeconds}, err + } + } + } + + return ctrl.Result{RequeueAfter: requeueTimeThirtySeconds}, nil + } else { + return ctrl.Result{RequeueAfter: requeueTimeThirtySeconds}, nil + } } // res = ctrl.Result{RequeueAfter: 5 * time.Minute} @@ -356,6 +395,7 @@ func (r *HarvesterClusterReconciler) ReconcileNormal(scope ClusterScope) (res ct scope.HarvesterCluster.Status = infrav1.HarvesterClusterStatus{ Ready: true, } + res = ctrl.Result{RequeueAfter: 1 * time.Minute} return res, err } @@ -366,14 +406,14 @@ func (r *HarvesterClusterReconciler) ReconcileNormal(scope ClusterScope) (res ct if err != nil { logger.V(1).Info("could not create the LoadBalancer, requeuing ...") - return ctrl.Result{RequeueAfter: 5 * time.Minute}, nil + return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } lbIP, err := getLoadBalancerIP(scope.HarvesterCluster, scope.HarvesterClient) if err != nil { - logger.Error(err, "could not get the LoadBalancer IP") + logger.Info("LoadBalancer IP is not yet available, requeuing ...") - return ctrl.Result{RequeueAfter: requeueTimeThirtySeconds}, err + return ctrl.Result{RequeueAfter: requeueTimeThirtySeconds}, nil } scope.HarvesterCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ @@ -387,12 +427,85 @@ func (r *HarvesterClusterReconciler) ReconcileNormal(scope ClusterScope) (res ct Status: apiv1.ConditionTrue, }) - return ctrl.Result{RequeueAfter: 5 * time.Minute}, nil + return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } return res, err } +func getIPFromIPPool(scope ClusterScope, lbNamespacedName string) (string, error) { + poolRef := scope.HarvesterCluster.Spec.LoadBalancerConfig.IpPoolRef + + if poolRef == "" { + return "", fmt.Errorf("IP Pool reference is empty, while IPAMType is set to %s", infrav1.POOL) + } + + referencedIPPool, err := scope.HarvesterClient.LoadbalancerV1beta1().IPPools().Get( + context.TODO(), + poolRef, + v1.GetOptions{}) + if err != nil { + return "", errors.Wrapf(err, "could not get referenced IP Pool %s", poolRef) + } + + if referencedIPPool.Status.Available == 0 { + return "", fmt.Errorf("IP Pool %s does not have available addresses", poolRef) + } + + return allocateIPFromPool(referencedIPPool, lbNamespacedName, &scope) +} + +func allocateIPFromPool(refPool *lbv1beta1.IPPool, lbNamespacedName string, scope *ClusterScope) (string, error) { + rangeSlice := make([]allocator.Range, 0) + + var total int64 + + ranges := refPool.Spec.Ranges + + for i := range ranges { + element, err := locutil.MakeRange(&ranges[i]) + if err != nil { + return "", err + } + + rangeSlice = append(rangeSlice, *element) + + total += locutil.CountIP(element) + } + + rangeSet := allocator.RangeSet(rangeSlice) + + a := allocator.NewIPAllocator(&rangeSet, locutil.NewStore(refPool), 0) + + var ipObj *current.IPConfig + + var err error + + // apply the IP allocated before in priority + if refPool.Status.AllocatedHistory != nil { + for k, v := range refPool.Status.AllocatedHistory { + if lbNamespacedName == v { + ipObj, err = a.Get(lbNamespacedName, "", net.ParseIP(k)) + if err != nil { + return "", err + } + } + } + } + + if ipObj == nil { + ipObj, err = a.Get(lbNamespacedName, "", nil) + if err != nil { + return "", err + } + } + + // Update Pool in Harvester with the new allocated IP + scope.HarvesterClient.LoadbalancerV1beta1().IPPools().Update(context.TODO(), refPool, v1.UpdateOptions{}) + + return ipObj.Address.IP.String(), nil +} + func getLoadBalancerIP(harvesterCluster *infrav1.HarvesterCluster, hvClient *lbclient.Clientset) (string, error) { createdLB, err := hvClient.LoadbalancerV1beta1().LoadBalancers(harvesterCluster.Spec.TargetNamespace).Get( context.TODO(), @@ -631,15 +744,26 @@ func (r *HarvesterClusterReconciler) getOwnedCPHarversterMachines(scope ClusterS return []infrav1.HarvesterMachine{}, errors.Wrap(err, "unable to list owned ControlPlane Machines") } - ownedCPHarvesterMachinesReady := make([]infrav1.HarvesterMachine, 0) + ownedCPMachinesExistInHarvester := make([]infrav1.HarvesterMachine, 0) for _, machine := range ownedCPHarvesterMachines.Items { - if machine.Status.Ready { - ownedCPHarvesterMachinesReady = append(ownedCPHarvesterMachinesReady, machine) + hvMachineName := machine.Name + hvMachineNamespace := scope.HarvesterCluster.Spec.TargetNamespace + + _, err := scope.HarvesterClient.KubevirtV1().VirtualMachines(hvMachineNamespace).Get(context.TODO(), hvMachineName, v1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + scope.Logger.V(4).Info("Owned ControlPlane Machine does not exist in Harvester yet", "machine-name", + hvMachineName, "machine-namespace", hvMachineNamespace) + + continue + } } + + ownedCPMachinesExistInHarvester = append(ownedCPMachinesExistInHarvester, machine) } - return ownedCPHarvesterMachinesReady, nil + return ownedCPMachinesExistInHarvester, nil } // ReconcileDelete is the part of the Reconcialiation that deletes a HarvesterCluster and everything which depends on it. diff --git a/controllers/harvestermachine_controller.go b/controllers/harvestermachine_controller.go index 690ac4b..90ebb28 100644 --- a/controllers/harvestermachine_controller.go +++ b/controllers/harvestermachine_controller.go @@ -96,7 +96,7 @@ func (r *HarvesterMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req hvMachine := &infrav1.HarvesterMachine{} if err := r.Get(ctx, req.NamespacedName, hvMachine); err != nil { if apierrors.IsNotFound(err) { - logger.Error(err, "harvestermachine not found") + logger.Info("harvestermachine not found") return ctrl.Result{}, nil } @@ -136,7 +136,7 @@ func (r *HarvesterMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req if ownerMachine == nil { logger.Info("Waiting for Machine Controller to set OwnerRef on HarvesterMachine") - return ctrl.Result{}, nil + return ctrl.Result{RequeueAfter: requeueTimeThirtySeconds}, nil } ownerCluster, err := util.GetClusterFromMetadata(ctx, r.Client, ownerMachine.ObjectMeta) @@ -195,7 +195,7 @@ func (r *HarvesterMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req if !hvMachine.DeletionTimestamp.IsZero() { return r.ReconcileDelete(hvScope) } else { - return r.ReconcileNormal(hvScope) //nolint:contextcheck + return r.ReconcileNormal(&hvScope) //nolint:contextcheck } } @@ -221,13 +221,15 @@ func (r *HarvesterMachineReconciler) SetupWithManager(ctx context.Context, mgr c Complete(r) } -func (r *HarvesterMachineReconciler) ReconcileNormal(hvScope Scope) (res reconcile.Result, rerr error) { +func (r *HarvesterMachineReconciler) ReconcileNormal(hvScope *Scope) (res reconcile.Result, rerr error) { logger := log.FromContext(hvScope.Ctx) // Return early if the object or Cluster is paused. if annotations.IsPaused(hvScope.Cluster, hvScope.HarvesterMachine) { logger.Info("Reconciliation is paused for this object") + hvScope.HarvesterMachine.Status.Ready = false + return ctrl.Result{}, nil } @@ -255,8 +257,11 @@ func (r *HarvesterMachineReconciler) ReconcileNormal(hvScope Scope) (res reconci hvScope.HarvesterMachine.Status.Ready = false - return ctrl.Result{}, nil + return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } + + vmExists := false + // check if Harvester has a machine with the same name and namespace existingVM, err := hvScope.HarvesterClient.KubevirtV1().VirtualMachines(hvScope.HarvesterCluster.Spec.TargetNamespace).Get( context.TODO(), hvScope.HarvesterMachine.Name, metav1.GetOptions{}) @@ -269,49 +274,54 @@ func (r *HarvesterMachineReconciler) ReconcileNormal(hvScope Scope) (res reconci } if (existingVM != nil) && (existingVM.Name == hvScope.HarvesterMachine.Name) { - logger.Info("VM " + existingVM.Namespace + "/" + existingVM.Name + " already exists in Harvester, updating IP addresses.") + vmExists = true if *existingVM.Spec.Running { ipAddresses, err := getIPAddressesFromVMI(existingVM, hvScope.HarvesterClient) if err != nil { + hvScope.HarvesterMachine.Status.Ready = false + if apierrors.IsNotFound(err) { logger.Info("VM is not running yet, waiting for it to be ready") return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } - logger.V(1).Info("unable to get IP addresses from VMI in Harvester, requeuing in 1 minute ...") + logger.V(1).Info("unable to get IP addresses from VMI in Harvester, requeuing ...") - return ctrl.Result{RequeueAfter: 1 * time.Minute}, err + return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } hvScope.HarvesterMachine.Status.Addresses = ipAddresses - if len(ipAddresses) > 0 { - hvScope.HarvesterMachine.Status.Ready = true - } else { + hvScope.HarvesterMachine.Status.Ready = false + + if len(ipAddresses) == 0 && hvScope.HarvesterMachine.Status.Ready { hvScope.HarvesterMachine.Status.Ready = false return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } - } - if hvScope.HarvesterMachine.Spec.ProviderID == "" { - providerID, err := getProviderIDFromWorkloadCluster(hvScope, existingVM) - if err != nil { - logger.Info("ProviderID not set on Node in workload cluster, setting a new one in harvesterMachine") - } + if hvScope.HarvesterMachine.Spec.ProviderID == "" { + providerID, _ := getProviderIDFromWorkloadCluster(hvScope, existingVM) - if providerID != "" { - hvScope.HarvesterMachine.Spec.ProviderID = providerID - hvScope.HarvesterMachine.Status.Ready = true - } else { - hvScope.HarvesterMachine.Status.Ready = false + if providerID != "" { + hvScope.HarvesterMachine.Spec.ProviderID = providerID + hvScope.HarvesterMachine.Status.Ready = true + } else { + logger.Info("Waiting for ProviderID to be set on Node resource in Workload Cluster ...") + hvScope.HarvesterMachine.Status.Ready = false - return ctrl.Result{RequeueAfter: 5 * time.Minute}, nil + return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil + } + } else { + conditions.MarkTrue(hvScope.HarvesterMachine, infrav1.MachineCreatedCondition) + hvScope.HarvesterMachine.Status.Ready = true } - } + } else { + hvScope.HarvesterMachine.Status.Ready = false - return ctrl.Result{}, nil + return ctrl.Result{RequeueAfter: requeueTimeThirtySeconds}, nil + } } if !conditions.IsTrue(hvScope.HarvesterMachine, infrav1.MachineCreatedCondition) { @@ -327,20 +337,20 @@ func (r *HarvesterMachineReconciler) ReconcileNormal(hvScope Scope) (res reconci } conditions.MarkTrue(hvScope.HarvesterMachine, infrav1.MachineCreatedCondition) - hvScope.HarvesterMachine.Status.Ready = true + hvScope.HarvesterMachine.Status.Ready = false // Patch the HarvesterCluster resource with the InitMachineCreatedCondition if it is not already set. - if conditions.IsFalse(hvScope.HarvesterCluster, infrav1.InitMachineCreatedCondition) { + if !conditions.IsTrue(hvScope.HarvesterCluster, infrav1.InitMachineCreatedCondition) { hvClusterCopy := hvScope.HarvesterCluster.DeepCopy() conditions.MarkTrue(hvClusterCopy, infrav1.InitMachineCreatedCondition) - hvClusterCopy.Status.Ready = false + hvClusterCopy.Status.Ready = hvScope.HarvesterCluster.Status.Ready if err := r.Client.Status().Patch(hvScope.Ctx, hvClusterCopy, client.MergeFrom(hvScope.HarvesterCluster)); err != nil { logger.Error(err, "failed to update HarvesterCluster Conditions with InitMachineCreatedCondition") } } } else { - if (existingVM == &kubevirtv1.VirtualMachine{}) { + if !vmExists { hvScope.HarvesterMachine.Status.Ready = false conditions.MarkFalse(hvScope.HarvesterMachine, infrav1.MachineCreatedCondition, infrav1.MachineNotFoundReason, clusterv1.ConditionSeverityError, "VM not found in Harvester") @@ -349,10 +359,12 @@ func (r *HarvesterMachineReconciler) ReconcileNormal(hvScope Scope) (res reconci } } - return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil + hvScope.HarvesterMachine.Status.Ready = true + + return ctrl.Result{}, nil } -func getProviderIDFromWorkloadCluster(hvScope Scope, existingVM *kubevirtv1.VirtualMachine) (string, error) { +func getProviderIDFromWorkloadCluster(hvScope *Scope, existingVM *kubevirtv1.VirtualMachine) (string, error) { var workloadConfig *rest.Config workloadConfig, err := getWorkloadClusterConfig(hvScope) @@ -382,7 +394,7 @@ func getProviderIDFromWorkloadCluster(hvScope Scope, existingVM *kubevirtv1.Virt } // getWorkloadClusterConfig returns a rest.Config for the workload cluster from a secret in the management cluster. -func getWorkloadClusterConfig(hvScope Scope) (*rest.Config, error) { +func getWorkloadClusterConfig(hvScope *Scope) (*rest.Config, error) { // Get the workload cluster kubeconfig secret workloadClusterKubeconfigSecret := &v1.Secret{} @@ -414,10 +426,9 @@ func getIPAddressesFromVMI(existingVM *kubevirtv1.VirtualMachine, hvClient *harv vmInstance, err := hvClient.KubevirtV1().VirtualMachineInstances(existingVM.Namespace).Get(context.TODO(), existingVM.Name, metav1.GetOptions{}) if err != nil { - if apierrors.IsNotFound(err) { - return ipAddresses, fmt.Errorf("no VM instance found for VM %s", existingVM.Name) - } - + // if apierrors.IsNotFound(err) { + // return ipAddresses, fmt.Errorf("no VM instance found for VM %s", existingVM.Name) + // } return ipAddresses, err } @@ -431,13 +442,17 @@ func getIPAddressesFromVMI(existingVM *kubevirtv1.VirtualMachine, hvClient *harv return ipAddresses, nil } -func createVMFromHarvesterMachine(hvScope Scope) (*kubevirtv1.VirtualMachine, error) { +func createVMFromHarvesterMachine(hvScope *Scope) (*kubevirtv1.VirtualMachine, error) { var err error vmLabels := map[string]string{ "harvesterhci.io/creator": "harvester", - cpVMLabelKey: cpVMLabelValuePrefix + "-" + hvScope.Cluster.Name, } + + if _, ok := hvScope.HarvesterMachine.Labels[clusterv1.MachineControlPlaneLabel]; ok { + vmLabels[cpVMLabelKey] = cpVMLabelValuePrefix + "-" + hvScope.Cluster.Name + } + vmiLabels := vmLabels vmName := hvScope.HarvesterMachine.Name @@ -538,7 +553,7 @@ func buildPVCAnnotationFromImageID( return string(pvcJsonString), nil } -func getImageFromHarvesterMachine(imageVolumes []infrav1.Volume, hvScope Scope) (image *harvesterv1beta1.VirtualMachineImage, err error) { +func getImageFromHarvesterMachine(imageVolumes []infrav1.Volume, hvScope *Scope) (image *harvesterv1beta1.VirtualMachineImage, err error) { vmImageNamespacedString := imageVolumes[0].ImageName err, vmImageNamespacedName := locutil.GetNamespacedName(vmImageNamespacedString, hvScope.HarvesterCluster.Spec.TargetNamespace) @@ -568,7 +583,7 @@ func getImageFromHarvesterMachine(imageVolumes []infrav1.Volume, hvScope Scope) } // buildVMTemplate creates a *kubevirtv1.VirtualMachineInstanceTemplateSpec from the CLI Flags and some computed values. -func buildVMTemplate(hvScope Scope, +func buildVMTemplate(hvScope *Scope, pvcName string, vmiLabels map[string]string, ) (vmTemplate *kubevirtv1.VirtualMachineInstanceTemplateSpec, err error) { var sshKey *harvesterv1beta1.KeyPair @@ -791,7 +806,7 @@ func getKubevirtNetworksFromHarvesterMachine(harvesterMachine *infrav1.Harvester return networks } -func getCloudInitData(hvScope Scope) (string, error) { +func getCloudInitData(hvScope *Scope) (string, error) { dataSecretNamespacedName := types.NamespacedName{ Namespace: hvScope.Machine.Namespace, Name: *hvScope.Machine.Spec.Bootstrap.DataSecretName, diff --git a/go.mod b/go.mod index 1a0ee3a..a13bbec 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,8 @@ go 1.20 require ( emperror.dev/errors v0.8.0 + github.com/containernetworking/cni v1.1.2 + github.com/containernetworking/plugins v1.1.1 github.com/harvester/harvester v1.2.1 github.com/harvester/harvester-load-balancer v0.2.3 github.com/k8snetworkplumbingwg/network-attachment-definition-client v0.0.0-20200331171230-d50e42f2b669 @@ -25,10 +27,14 @@ require ( ) require ( + github.com/coreos/go-iptables v0.6.0 // indirect github.com/google/gxui v0.0.0-20151028112939-f85e0a97b3a4 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/rancher/eks-operator v1.1.5 // indirect github.com/rancher/gke-operator v1.1.4 // indirect + github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1 // indirect + github.com/vishvananda/netlink v1.2.1-beta.2 // indirect + github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect ) diff --git a/go.sum b/go.sum index 9baa27a..1315187 100644 --- a/go.sum +++ b/go.sum @@ -129,11 +129,17 @@ github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE= +github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0= github.com/coredns/corefile-migration v1.0.21 h1:W/DCETrHDiFo0Wj03EyMkaQ9fwsmSgqTCQDHpceaSsE= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -685,6 +691,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1 h1:ZFfeKAhIQiiOrQaI3/znw0gOmYpO28Tcu1YaqMa/jtQ= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -750,6 +758,11 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1 github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= +github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= @@ -1029,6 +1042,7 @@ golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1039,6 +1053,7 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/util/ippool.go b/util/ippool.go new file mode 100644 index 0000000..f36a605 --- /dev/null +++ b/util/ippool.go @@ -0,0 +1,236 @@ +package util + +import ( + "fmt" + "math/big" + "net" + "net/netip" + + "github.com/containernetworking/cni/pkg/types" + cnip "github.com/containernetworking/plugins/pkg/ip" + "github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator" + lbv1beta1 "github.com/harvester/harvester-load-balancer/pkg/apis/loadbalancer.harvesterhci.io/v1beta1" +) + +const ( + initialCapacity = 10 + p2pMaskStr = "ffffffff" +) + +// Store implements the backend.Store interface. +type Store struct { + // IPPool is the pool of IP addresses. + *lbv1beta1.IPPool +} + +// New creates a new store. +func NewStore(pool *lbv1beta1.IPPool) *Store { + return &Store{ + IPPool: pool, + } +} + +// Lock locks the store. +func (s *Store) Lock() error { + return nil +} + +func (s *Store) Unlock() error { + return nil +} + +func (s *Store) Close() error { + return nil +} + +func (s *Store) Reserve(id, _ string, ip net.IP, _ string) (bool, error) { + ipStr := ip.String() + + // return false if the IP is already reserved + if s.IPPool.Status.Allocated != nil { + if _, ok := s.IPPool.Status.Allocated[ipStr]; ok { + return false, nil + } + } + + if s.IPPool.Status.AllocatedHistory != nil { + s.IPPool.Status.AllocatedHistory[ipStr] = id + } + + return true, nil +} + +func (s *Store) LastReservedIP(rangeID string) (net.IP, error) { + return net.ParseIP(s.IPPool.Status.LastAllocated), nil +} + +func (s *Store) Release(ip net.IP) error { + if s.IPPool.Status.Allocated == nil { + return nil + } + + ipStr := ip.String() + + if s.IPPool.Status.AllocatedHistory == nil { + s.IPPool.Status.AllocatedHistory = make(map[string]string) + } + + s.IPPool.Status.AllocatedHistory[ipStr] = s.IPPool.Status.Allocated[ipStr] + delete(s.IPPool.Status.Allocated, ipStr) + s.IPPool.Status.Available++ + + return nil +} + +func (s *Store) ReleaseByID(id string, _ string) error { + if s.IPPool.Status.Allocated == nil { + return nil + } + + for ip, applicant := range s.IPPool.Status.Allocated { + if applicant == id { + if s.IPPool.Status.AllocatedHistory == nil { + s.IPPool.Status.AllocatedHistory = make(map[string]string) + } + + s.IPPool.Status.AllocatedHistory[ip] = applicant + delete(s.IPPool.Status.Allocated, ip) + s.IPPool.Status.Available++ + } + } + + return nil +} + +func (s *Store) GetByID(id string, _ string) []net.IP { + ips := make([]net.IP, 0, initialCapacity) + + for ip, applicant := range s.IPPool.Status.Allocated { + if id == applicant { + ips = append(ips, net.ParseIP(ip)) + } + } + + return ips +} + +func MakeRange(r *lbv1beta1.Range) (*allocator.Range, error) { + ip, ipNet, err := net.ParseCIDR(r.Subnet) + if err != nil { + return nil, fmt.Errorf("invalide range %+v", r) + } + + var defaultStart, defaultEnd, defaultGateway, start, end, gateway net.IP + mask := ipNet.Mask.String() + // If the subnet is a point to point IP + if mask == p2pMaskStr { + defaultStart = ip.To16() + defaultEnd = ip.To16() + defaultGateway = nil + } else { + // The rangeStart defaults to `.1` IP inside the `subnet` block. + // The rangeEnd defaults to `.254` IP inside the `subnet` block for ipv4, `.255` for IPv6. + // The gateway defaults to `.1` IP inside the `subnet` block. + // Example: + // subnet: 192.168.0.0/24 + // rangeStart: 192.168.0.1 + // rangeEnd: 192.168.0.254 + // gateway: 192.168.0.1 + // The gateway will be skipped during allocation. + // To return the IP with 16 bytes representation as same as what the function net.ParseIP returns + defaultStart = cnip.NextIP(ipNet.IP).To16() + defaultEnd = lastIP(*ipNet).To16() + defaultGateway = cnip.NextIP(ipNet.IP).To16() + } + + start, err = parseIP(r.RangeStart, ipNet, defaultStart) + if err != nil { + return nil, fmt.Errorf("invalid range start %s: %w", r.RangeStart, err) + } + end, err = parseIP(r.RangeEnd, ipNet, defaultEnd) + if err != nil { + return nil, fmt.Errorf("invalid range end %s: %w", r.RangeEnd, err) + } + gateway, err = parseIP(r.Gateway, ipNet, defaultGateway) + if err != nil { + return nil, fmt.Errorf("invalid gateway %s: %w", r.Gateway, err) + } + + // Ensure start IP is smaller than end IP + startAddr, _ := netip.AddrFromSlice(start) + endAddr, _ := netip.AddrFromSlice(end) + if startAddr.Compare(endAddr) > 0 { + start, end = end, start + } + + return &allocator.Range{ + RangeStart: start, + RangeEnd: end, + Subnet: types.IPNet(*ipNet), + Gateway: gateway, + }, nil +} + +func networkIP(n net.IPNet) net.IP { + return n.IP.Mask(n.Mask) +} + +func parseIP(ipStr string, ipNet *net.IPNet, defaultIP net.IP) (net.IP, error) { + if ipStr == "" { + return defaultIP, nil + } + + ip := net.ParseIP(ipStr) + if ip == nil { + return nil, fmt.Errorf("invalid IP %s", ipStr) + } + if !ipNet.Contains(ip) { + return nil, fmt.Errorf("IP %s is out of subnet %s", ipStr, ipNet.String()) + } + if ip.Equal(networkIP(*ipNet)) { + return nil, fmt.Errorf("IP %s is the network address", ipStr) + } + if ip.Equal(broadcastIP(*ipNet)) { + return nil, fmt.Errorf("IP %s is the broadcast address", ipStr) + } + + return ip, nil +} + +func broadcastIP(n net.IPNet) net.IP { + broadcast := make(net.IP, len(n.IP)) + for i := 0; i < len(n.IP); i++ { + broadcast[i] = n.IP[i] | ^n.Mask[i] + } + return broadcast +} + +// Determine the last IP of a subnet, excluding the broadcast if IPv4 +func lastIP(subnet net.IPNet) net.IP { + var end net.IP + for i := 0; i < len(subnet.IP); i++ { + end = append(end, subnet.IP[i]|^subnet.Mask[i]) + } + if subnet.IP.To4() != nil { + end[3]-- + } + + return end +} + +func CountIP(r *allocator.Range) int64 { + count := big.NewInt(0).Add(big.NewInt(0).Sub(ipToInt(r.RangeEnd), ipToInt(r.RangeStart)), big.NewInt(1)).Int64() + + if r.Gateway != nil && r.Contains(r.Gateway) { + count-- + } + + return count +} + +func ipToInt(ip net.IP) *big.Int { + if v := ip.To4(); v != nil { + return big.NewInt(0).SetBytes(v) + } + return big.NewInt(0).SetBytes(ip.To16()) +}