Skip to content

Commit

Permalink
refactor: remove log in parameters and rename functions
Browse files Browse the repository at this point in the history
  • Loading branch information
Aldo Fuster Turpin committed Jul 20, 2023
1 parent 38aa4a9 commit 01e7151
Show file tree
Hide file tree
Showing 6 changed files with 595 additions and 310 deletions.
2 changes: 1 addition & 1 deletion pkg/cluster/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ func (m *manager) deleteResources(ctx context.Context) error {

case "microsoft.network/privatednszones":
m.log.Printf("deleting private DNS nested resources of %s", *resource.ID)
err = utilnet.DeletePrivateDNSVirtualNetworkLinks(ctx, m.virtualNetworkLinks, *resource.ID)
err = utilnet.DeletePrivateDNSVNetLinks(ctx, m.virtualNetworkLinks, *resource.ID)
if err != nil {
return err
}
Expand Down
188 changes: 84 additions & 104 deletions pkg/cluster/removeprivatednszone.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,12 @@ package cluster

import (
"context"
"strings"

mgmtprivatedns "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns"
"github.com/Azure/go-autorest/autorest/azure"
configclient "github.com/openshift/client-go/config/clientset/versioned"
v1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
"github.com/sirupsen/logrus"
"errors"
"fmt"

v1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"

Check failure on line 11 in pkg/cluster/removeprivatednszone.go

View workflow job for this annotation

GitHub Actions / golangci-lint

import "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" imported as "v1" but must be "mcv1" according to config (importas)
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"

utilnet "github.com/Azure/ARO-RP/pkg/util/net"
"github.com/Azure/ARO-RP/pkg/util/ready"
Expand All @@ -23,7 +19,8 @@ import (
)

func (m *manager) removePrivateDNSZone(ctx context.Context) error {
resourceGroup := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
resourceGroupID := m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID
resourceGroup := stringutils.LastTokenByte(resourceGroupID, '/')

zones, err := m.privateZones.ListByResourceGroup(ctx, resourceGroup, nil)
if err != nil {
Expand All @@ -33,145 +30,128 @@ func (m *manager) removePrivateDNSZone(ctx context.Context) error {

if len(zones) == 0 {
// fix up any clusters that we already upgraded
if err := utilnet.UpdateDNSs(ctx, m.configcli.ConfigV1().DNSes(), m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID); err != nil {
if err := utilnet.UpdateDNSs(ctx, m.configcli.ConfigV1().DNSes(), resourceGroupID); err != nil {
m.log.Print(err)
}
return nil
}

if !m.clusterHasSameNumberOfNodesAndMachineConfigPools(ctx) {
return nil
if m.mcocli == nil {
return errors.New("manager.mcocli is nil")
}

if !version.ClusterVersionIsGreaterThan4_3(ctx, m.configcli, m.log) {
return nil
if m.kubernetescli == nil {
return errors.New("manager.kubernetescli is nil")
}

if err = utilnet.UpdateDNSs(ctx, m.configcli.ConfigV1().DNSes(), m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID); err != nil {
mcpLister := m.mcocli.MachineconfigurationV1().MachineConfigPools()
nodeLister := m.kubernetescli.CoreV1().Nodes()
sameNumberOfNodesAndMachines, err := sameNumberOfNodesAndMachines(ctx, mcpLister, nodeLister)
// sameNumberOfNodesAndMachines, err := m.sameNumberOfNodesAndMachines(ctx)
if err != nil {
m.log.Print(err)
return nil
}

utilnet.RemoveZones(ctx, m.log, m.virtualNetworkLinks, m.privateZones, zones, resourceGroup)
return nil
}
if !sameNumberOfNodesAndMachines {
return nil
}

func (m *manager) clusterHasSameNumberOfNodesAndMachineConfigPools(ctx context.Context) bool {
machineConfigPoolList, err := m.mcocli.MachineconfigurationV1().MachineConfigPools().List(ctx, metav1.ListOptions{})
clusterVersionIsLessThan4_4, err := version.ClusterVersionIsLessThan4_4(ctx, m.configcli)
if err != nil {
m.log.Print(err)
return false
return nil
}

nMachineConfigPools, errorOcurred := validateMachineConfigPoolsAndGetCounter(machineConfigPoolList.Items, m.log)
if errorOcurred {
return false
if clusterVersionIsLessThan4_4 {
m.log.Printf("cluster version < 4.4, not removing private DNS zone")
return nil
}

nodes, err := m.kubernetescli.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
if err = utilnet.UpdateDNSs(ctx, m.configcli.ConfigV1().DNSes(), resourceGroupID); err != nil {
m.log.Print(err)
return false
}

nNodes := len(nodes.Items)
if nNodes != nMachineConfigPools {
m.log.Printf("cluster has %d nodes but %d under MCPs, not removing private DNS zone", nNodes, nMachineConfigPools)
return false
return nil
}
return true
}

func validateMachineConfigPoolsAndGetCounter(machineConfigPools []mcv1.MachineConfigPool, logEntry *logrus.Entry) (nMachineConfigPools int, errOccurred bool) {
for _, mcp := range machineConfigPools {
if !utilnet.McpContainsARODNSConfig(mcp) {
logEntry.Printf("ARO DNS config not found in MCP %s", mcp.Name)
return 0, true
}

if !ready.MachineConfigPoolIsReady(&mcp) {
logEntry.Printf("MCP %s not ready", mcp.Name)
return 0, true
}

nMachineConfigPools += int(mcp.Status.MachineCount)
if err := utilnet.RemoveZones(ctx, m.virtualNetworkLinks, m.privateZones, zones, resourceGroup); err != nil {
m.log.Print(err)
return nil
}
return nMachineConfigPools, false
return nil
}

func mcpContainsARODNSConfig(mcp mcv1.MachineConfigPool) bool {
for _, source := range mcp.Status.Configuration.Source {
mcpPrefix := "99-"
mcpSuffix := "-aro-dns"

if source.Name == mcpPrefix+mcp.Name+mcpSuffix {
return true
}
}
return false
type MCPLister interface {
List(ctx context.Context, opts metav1.ListOptions) (*v1.MachineConfigPoolList, error)
}

func (m *manager) updateDNSs(ctx context.Context) error {
fn := updateClusterDNSFn(ctx, m.configcli.ConfigV1().DNSes(), m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID)
return retry.RetryOnConflict(retry.DefaultRetry, fn)
type NodeLister interface {
List(ctx context.Context, opts metav1.ListOptions) (*corev1.NodeList, error)
}

func updateClusterDNSFn(ctx context.Context, dnsInterface v1.DNSInterface, resourceGroupID string) func() error {
return func() error {
dns, err := dnsInterface.Get(ctx, "cluster", metav1.GetOptions{})
if err != nil {
return err
}

if dns.Spec.PrivateZone == nil ||
!strings.HasPrefix(
strings.ToLower(dns.Spec.PrivateZone.ID),
strings.ToLower(resourceGroupID)) {
return nil
}

dns.Spec.PrivateZone = nil

_, err = dnsInterface.Update(ctx, dns, metav1.UpdateOptions{})
return err
// sameNumberOfNodesAndMachines returns true if the cluster has the same number of nodes and total machines,
// and an error if any. Otherwise it returns false and nil.
func sameNumberOfNodesAndMachines(ctx context.Context, mcpLister MCPLister, nodeLister NodeLister) (bool, error) {
machineConfigPoolList, err := mcpLister.List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
}
}

func clusterVersionIsAtLeast4_4(ctx context.Context, configcli configclient.Interface, logEntry *logrus.Entry) (bool, error) {
cv, err := configcli.ConfigV1().ClusterVersions().Get(ctx, "version", metav1.GetOptions{})
totalMachines, err := totalMachinesInTheMCPs(machineConfigPoolList.Items)
if err != nil {
return false, err
}
v, err := version.GetClusterVersion(cv)

nodes, err := nodeLister.List(ctx, metav1.ListOptions{})
if err != nil {
logEntry.Print(err)
return false, nil
return false, err
}

if v.Lt(version.NewVersion(4, 4)) {
// 4.3 uses SRV records for etcd
logEntry.Printf("cluster version < 4.4, not removing private DNS zone")
return false, nil
nNodes := len(nodes.Items)
if nNodes != totalMachines {
return false, fmt.Errorf("cluster has %d nodes but %d under MCPs, not removing private DNS zone", nNodes, totalMachines)
}

return true, nil
}

func (m *manager) removeZones(ctx context.Context, privateZones []mgmtprivatedns.PrivateZone, resourceGroup string) {
for _, privateZone := range privateZones {
if err := utilnet.DeletePrivateDNSVirtualNetworkLinks(ctx, m.virtualNetworkLinks, *privateZone.ID); err != nil {
m.log.Print(err)
return
// func (m *manager) sameNumberOfNodesAndMachines(ctx context.Context) (bool, error) {
// machineConfigPoolList, err := m.mcocli.MachineconfigurationV1().MachineConfigPools().List(ctx, metav1.ListOptions{})
// if err != nil {
// return false, err
// }

// totalMachines, err := totalMachinesInTheMCPs(machineConfigPoolList.Items)
// if err != nil {
// return false, err
// }

// nodes, err := m.kubernetescli.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
// if err != nil {
// return false, err
// }

// nNodes := len(nodes.Items)
// if nNodes != totalMachines {
// return false, fmt.Errorf("cluster has %d nodes but %d under MCPs, not removing private DNS zone", nNodes, totalMachines)
// }

// return true, nil
// }

// totalMachinesInTheMCPs returns the total number of machines in the machineConfigPools
// and an error, if any.
func totalMachinesInTheMCPs(machineConfigPools []v1.MachineConfigPool) (int, error) {
totalMachines := 0
for _, mcp := range machineConfigPools {
if !utilnet.McpContainsARODNSConfig(mcp) {
return 0, fmt.Errorf("ARO DNS config not found in MCP %s", mcp.Name)
}

r, err := azure.ParseResourceID(*privateZone.ID)
if err != nil {
m.log.Print(err)
return
if !ready.MachineConfigPoolIsReady(&mcp) {
return 0, fmt.Errorf("MCP %s not ready", mcp.Name)
}

if err = m.privateZones.DeleteAndWait(ctx, resourceGroup, r.ResourceName, ""); err != nil {
m.log.Print(err)
return
}
totalMachines += int(mcp.Status.MachineCount)
}
return totalMachines, nil
}
Loading

0 comments on commit 01e7151

Please sign in to comment.