Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-4.16] OCPBUGS-50594: fixes overzealous deletion of SNAT in egressIP #2456

Open
wants to merge 1 commit into
base: release-4.16
Choose a base branch
from

Conversation

jluhrsen
Copy link
Contributor

@jluhrsen jluhrsen commented Feb 11, 2025

currently when creating egressIP in a dualstack cluster regardless of if the egressip is ipv4 or ipv6 when disable-snat-multiple-gws is set the code removes both ipv4 and ipv6 snats from the database. This means that the pod will not be able to communicate with the cluster correctly becuase the Gateway Router is missing an SNAT and the traffic will be dropped.

Additionally the testing looks a little different from the others because we do not correctly setup dualstack clusters when running unit tests

Signed-off-by: Jacob Tanenbaum [email protected]
(cherry picked from commit e64d754)

4.16 is not using ginkgo v2 so the new tests that came in from this PR using DescribeTable() and Entry() from v2 had
to be converted to ginkgotable.DescribeTable and ginkgotable.Entry()

additionally, there were two cherry-pick conflicts so the bot could not handle this and it was created manually.

func (e *egressIPZoneController) deleteExternalGWPodSNATOps(ops []ovsdb.Operation, pod *kapi.Pod, podIPs []*net.IPNet, status egressipv1.EgressIPStatusItem, isOVNNetwork bool) ([]ovsdb.Operation, error) {
	if config.Gateway.DisableSNATMultipleGWs && status.Node == pod.Spec.NodeName && isOVNNetwork {
		affectedIPs := util.MatchAllIPNetFamily(utilnet.IsIPv6String(status.EgressIP), podIPs)
		if len(affectedIPs) == 0 {
			return nil, nil // noting to do.
		}
		// remove snats to->nodeIP (from the node where pod exists if that node is also serving
		// as an egress node for this pod) for these podIPs before adding the snat to->egressIP
		extIPs, err := getExternalIPsGR(e.watchFactory, pod.Spec.NodeName)
		if err != nil {
			return nil, err
		}
<<<<<<< HEAD
		ops, err = deletePodSNATOps(e.nbClient, ops, pod.Spec.NodeName, extIPs, podIPs)
||||||| parent of e64d7549b (fixes overzealous deletion of SNAT in egressIP)
		ops, err = deletePodSNATOps(e.nbClient, ops, e.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podIPs)
=======
		ops, err = deletePodSNATOps(e.nbClient, ops, e.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, affectedIPs)
>>>>>>> e64d7549b (fixes overzealous deletion of SNAT in egressIP)
		if err != nil {
			return nil, err
		}
	} else if config.Gateway.DisableSNATMultipleGWs {
		// it means the pod host is different from the egressNode that is managing the pod
		klog.V(5).Infof("Not deleting SNAT on %s since egress node managing %s/%s is %s or Egress IP is not SNAT'd by OVN", pod.Spec.NodeName, pod.Namespace, pod.Name, status.Node)
	}
	return ops, nil
}

which was resolved to just use:

ops, err = deletePodSNATOps(e.nbClient, ops, pod.Spec.NodeName, extIPs, affectedIPs)

<<<<<<< HEAD
||||||| parent of e64d7549b (fixes overzealous deletion of SNAT in egressIP)

// returns the address set with externalID "k8s.ovn.org/name": "egresssvc-served-pods"
func buildEgressIPServiceAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) {
	dbIDs := egresssvc.GetEgressServiceAddrSetDbIDs(DefaultNetworkControllerName)
	return addressset.GetTestDbAddrSets(dbIDs, ips)
}

// returns the address set with externalID "k8s.ovn.org/name": "egressip-served-pods""
func buildEgressIPServedPodsAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) {
	dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, DefaultNetworkControllerName)
	return addressset.GetTestDbAddrSets(dbIDs, ips)

}

// returns the address set with externalID "k8s.ovn.org/name": "node-ips"
func buildEgressIPNodeAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) {
	dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, DefaultNetworkControllerName)
	return addressset.GetTestDbAddrSets(dbIDs, ips)
}
=======

// returns the address set with externalID "k8s.ovn.org/name": "egresssvc-served-pods"
func buildEgressIPServiceAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) {
	dbIDs := egresssvc.GetEgressServiceAddrSetDbIDs(DefaultNetworkControllerName)
	return addressset.GetTestDbAddrSets(dbIDs, ips)
}

// returns the address set with externalID "k8s.ovn.org/name": "egressip-served-pods""
func buildEgressIPServedPodsAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) {
	dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, DefaultNetworkControllerName)
	return addressset.GetTestDbAddrSets(dbIDs, ips)

}

// returns the address set with externalID "k8s.ovn.org/name": "node-ips"
func buildEgressIPNodeAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) {
	dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, DefaultNetworkControllerName)
	return addressset.GetTestDbAddrSets(dbIDs, ips)
}

// returns the no reroute policies associated with services and directly to pods
// func getDefaultReroutePolicies() []*nbdb.LogicalRouterPolicy {
func buildDefaultReroutePolicies() ([]string, []libovsdbtest.TestData) {
	//	logicalReroutePolicies := []*nbdb.LogicalRouterPolicy{}
	testData := []libovsdbtest.TestData{}
	uuids := []string{}

	v4Subnets, v6Subnets := util.GetClusterSubnets()
	for i, v4Subnet := range v4Subnets {
		testData = append(testData,
			&nbdb.LogicalRouterPolicy{
				Priority: types.DefaultNoRereoutePriority,
				Match:    fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Subnet.String(), config.Gateway.V4JoinSubnet),
				Action:   nbdb.LogicalRouterPolicyActionAllow,
				UUID:     fmt.Sprintf("no-reroute-v4-service-%d-UUID", i),
			},
			&nbdb.LogicalRouterPolicy{
				Priority: types.DefaultNoRereoutePriority,
				Match:    fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Subnet.String(), v4Subnet.String()),
				Action:   nbdb.LogicalRouterPolicyActionAllow,
				UUID:     fmt.Sprintf("no-reroute-v4-%d-UUID", i),
			},
		)
		uuids = append(uuids,
			fmt.Sprintf("no-reroute-v4-service-%d-UUID", i),
			fmt.Sprintf("no-reroute-v4-%d-UUID", i),
		)

	}
	for i, v6Subnet := range v6Subnets {
		testData = append(testData,
			&nbdb.LogicalRouterPolicy{
				Priority: types.DefaultNoRereoutePriority,
				Match:    fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6Subnet.String(), config.Gateway.V6JoinSubnet),
				Action:   nbdb.LogicalRouterPolicyActionAllow,
				UUID:     fmt.Sprintf("no-reroute-v6-service-%d-UUID", i),
			},
			&nbdb.LogicalRouterPolicy{
				Priority: types.DefaultNoRereoutePriority,
				Match:    fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6Subnet.String(), v6Subnet.String()),
				Action:   nbdb.LogicalRouterPolicyActionAllow,
				UUID:     fmt.Sprintf("no-reroute-v6-%d-UUID", i),
			},
		)
		uuids = append(uuids, fmt.Sprintf("no-reroute-v6-service-%d-UUID", i), fmt.Sprintf("no-reroute-v6-%d-UUID", i))
	}

	return uuids, testData

}

// makes the egressipDefaultNoReroutePolicies for the node and the corresponding address sets, additionally returns a slice of UUIDS used to append to the router policy
func buildDefaultNoRerouteNodePolicies(podAddresses, nodeAddresses, serviceAddresses []string) ([]string, []libovsdbtest.TestData) {
	testData := []libovsdbtest.TestData{}
	uuids := []string{}

	egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets(podAddresses)
	egressNodeIPsASv4, egressNodeIPsASv6 := buildEgressIPNodeAddressSets(nodeAddresses)
	egressSVCServedPodsASv4, egressSVCServedPodsASv6 := buildEgressIPServiceAddressSets(serviceAddresses)

	if config.IPv4Mode {
		testData = append(testData, egressIPServedPodsASv4, egressNodeIPsASv4, egressSVCServedPodsASv4,
			&nbdb.LogicalRouterPolicy{
				Priority: types.DefaultNoRereoutePriority,
				Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s",
					egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name),
				Action:  nbdb.LogicalRouterPolicyActionAllow,
				UUID:    "default-no-reroute-node-v4-UUID",
				Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark},
			},
		)
		uuids = append(uuids, "default-no-reroute-node-v4-UUID")
	}
	if config.IPv6Mode {
		testData = append(testData, egressIPServedPodsASv6, egressNodeIPsASv6, egressSVCServedPodsASv6,
			&nbdb.LogicalRouterPolicy{
				Priority: types.DefaultNoRereoutePriority,
				Match: fmt.Sprintf("(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s",
					egressIPServedPodsASv6.Name, egressSVCServedPodsASv6.Name, egressNodeIPsASv6.Name),
				Action:  nbdb.LogicalRouterPolicyActionAllow,
				UUID:    "default-no-reroute-node-v6-UUID",
				Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark},
			},
		)
		uuids = append(uuids, "default-no-reroute-node-v6-UUID")
	}

	return uuids, testData

}
>>>>>>> e64d7549b (fixes overzealous deletion of SNAT in egressIP)

which came down to the cherry-pick not able to de-dup three identical functions buildEgressIPServiceAddressSets(), buildEgressIPServedPodsAddressSets() and buildEgressIPNodeAddressSets().

@openshift-ci-robot openshift-ci-robot added jira/severity-important Referenced Jira bug's severity is important for the branch this PR is targeting. jira/valid-reference Indicates that this PR references a valid Jira ticket of any type. jira/invalid-bug Indicates that a referenced Jira bug is invalid for the branch this PR is targeting. labels Feb 11, 2025
@openshift-ci-robot
Copy link
Contributor

@jluhrsen: This pull request references Jira Issue OCPBUGS-50594, which is invalid:

  • expected the bug to target either version "4.16." or "openshift-4.16.", but it targets "4.17.0" instead
  • expected dependent Jira Issue OCPBUGS-48828 to be in one of the following states: VERIFIED, RELEASE PENDING, CLOSED (ERRATA), CLOSED (CURRENT RELEASE), CLOSED (DONE), CLOSED (DONE-ERRATA), but it is MODIFIED instead

Comment /jira refresh to re-evaluate validity if changes to the Jira bug are made, or edit the title of this pull request to link to a different bug.

The bug has been updated to refer to the pull request using the external bug tracker.

In response to this:

currently when creating egressIP in a dualstack cluster regardless of if the egressip is ipv4 or ipv6 when disable-snat-multiple-gws is set the code removes both ipv4 and ipv6 snats from the database. This means that the pod will not be able to communicate with the cluster correctly becuase the Gateway Router is missing an SNAT and the traffic will be dropped.

Additionally the testing looks a little different from the others because we do not correctly setup dualstack clusters when running unit tests

Signed-off-by: Jacob Tanenbaum [email protected]
(cherry picked from commit e64d754)

📑 Description

Fixes #

Additional Information for reviewers

✅ Checks

  • My code requires changes to the documentation
  • if so, I have updated the documentation as required
  • My code requires tests
  • if so, I have added and/or updated the tests as required
  • All the tests have passed in the CI

How to verify it

Instructions for interacting with me using PR comments are available here. If you have questions or suggestions related to my behavior, please file an issue against the openshift-eng/jira-lifecycle-plugin repository.

@jluhrsen
Copy link
Contributor Author

/jira refresh

@openshift-ci-robot
Copy link
Contributor

@jluhrsen: This pull request references Jira Issue OCPBUGS-50594, which is invalid:

  • expected dependent Jira Issue OCPBUGS-48828 to be in one of the following states: VERIFIED, RELEASE PENDING, CLOSED (ERRATA), CLOSED (CURRENT RELEASE), CLOSED (DONE), CLOSED (DONE-ERRATA), but it is MODIFIED instead

Comment /jira refresh to re-evaluate validity if changes to the Jira bug are made, or edit the title of this pull request to link to a different bug.

In response to this:

/jira refresh

Instructions for interacting with me using PR comments are available here. If you have questions or suggestions related to my behavior, please file an issue against the openshift-eng/jira-lifecycle-plugin repository.

@trozet
Copy link
Contributor

trozet commented Feb 11, 2025

/label backport-risk-assessed
/lgtm

@openshift-ci openshift-ci bot added the backport-risk-assessed Indicates a PR to a release branch has been evaluated and considered safe to accept. label Feb 11, 2025
@openshift-ci openshift-ci bot added the lgtm Indicates that a PR is ready to be merged. label Feb 11, 2025
Copy link
Contributor

openshift-ci bot commented Feb 11, 2025

[APPROVALNOTIFIER] This PR is APPROVED

This pull-request has been approved by: jluhrsen, trozet

The full list of commands accepted by this bot can be found here.

The pull request process is described here

Needs approval from an approver in each of these files:

Approvers can indicate their approval by writing /approve in a comment
Approvers can cancel approval by writing /approve cancel in a comment

@openshift-ci openshift-ci bot added the approved Indicates a PR has been approved by an approver from all required OWNERS files. label Feb 11, 2025
Copy link
Contributor

openshift-ci bot commented Feb 11, 2025

@asood-rh: The label(s) /label cherry-pick-pproved cannot be applied. These labels are supported: acknowledge-critical-fixes-only, platform/aws, platform/azure, platform/baremetal, platform/google, platform/libvirt, platform/openstack, ga, tide/merge-method-merge, tide/merge-method-rebase, tide/merge-method-squash, px-approved, docs-approved, qe-approved, no-qe, downstream-change-needed, rebase/manual, cluster-config-api-changed, approved, backport-risk-assessed, bugzilla/valid-bug, cherry-pick-approved, jira/valid-bug, staff-eng-approved. Is this label configured under labels -> additional_labels or labels -> restricted_labels in plugin.yaml?

In response to this:

/label cherry-pick-pproved

Instructions for interacting with me using PR comments are available here. If you have questions or suggestions related to my behavior, please file an issue against the kubernetes-sigs/prow repository.

@asood-rh
Copy link

/label cherry-pick-approved

@openshift-ci openshift-ci bot added the cherry-pick-approved Indicates a cherry-pick PR into a release branch has been approved by the release branch manager. label Feb 11, 2025
@jechen0648
Copy link

/ocpbugs cc-qa

@jechen0648
Copy link

jechen0648 commented Feb 11, 2025

@asood-rh FYI I saw this PR, I was going to do pre-merge testing before labeling it

@asood-rh
Copy link

@asood-rh FYI I saw this PR, I was going to do pre-merge testing before labeling it

@jechen0648 I did not see /ocpbugs cc-qa label added to PR when I added cherry-pick-approved, to indicate it is going to be pre merge tested.

@jechen0648
Copy link

/label qe-approved

@openshift-ci openshift-ci bot added the qe-approved Signifies that QE has signed off on this PR label Feb 11, 2025
@jluhrsen
Copy link
Contributor Author

/retest

@jluhrsen
Copy link
Contributor Author

/hold
need to fix the unit tests

@openshift-ci openshift-ci bot added the do-not-merge/hold Indicates that a PR should not merge because someone has issued a /hold command. label Feb 12, 2025
currently when creating egressIP in a dualstack cluster regardless of if
the egressip is ipv4 or ipv6 when disable-snat-multiple-gws is set the
code removes both ipv4 and ipv6 snats from the database. This means that
the pod will not be able to communicate with the cluster correctly
becuase the Gateway Router is missing an SNAT and the traffic will be
dropped.

Additionally the testing looks a little different from the others
because we do not correctly setup dualstack clusters when running unit
tests

Signed-off-by: Jacob Tanenbaum <[email protected]>
(cherry picked from commit e64d754)
@openshift-ci openshift-ci bot removed the lgtm Indicates that a PR is ready to be merged. label Feb 21, 2025
Copy link
Contributor

openshift-ci bot commented Feb 21, 2025

New changes are detected. LGTM label has been removed.

@openshift-ci-robot openshift-ci-robot added the jira/valid-bug Indicates that a referenced Jira bug is valid for the branch this PR is targeting. label Feb 21, 2025
@openshift-ci-robot
Copy link
Contributor

@jluhrsen: This pull request references Jira Issue OCPBUGS-50594, which is valid. The bug has been moved to the POST state.

7 validation(s) were run on this bug
  • bug is open, matching expected state (open)
  • bug target version (4.16.z) matches configured target version for branch (4.16.z)
  • bug is in the state New, which is one of the valid states (NEW, ASSIGNED, POST)
  • release note text is set and does not match the template
  • dependent bug Jira Issue OCPBUGS-48828 is in the state Closed (Done-Errata), which is one of the valid states (VERIFIED, RELEASE PENDING, CLOSED (ERRATA), CLOSED (CURRENT RELEASE), CLOSED (DONE), CLOSED (DONE-ERRATA))
  • dependent Jira Issue OCPBUGS-48828 targets the "4.17.0" version, which is one of the valid target versions: 4.17.0, 4.17.z
  • bug has dependents

No GitHub users were found matching the public email listed for the QA contact in Jira ([email protected]), skipping review request.

In response to this:

currently when creating egressIP in a dualstack cluster regardless of if the egressip is ipv4 or ipv6 when disable-snat-multiple-gws is set the code removes both ipv4 and ipv6 snats from the database. This means that the pod will not be able to communicate with the cluster correctly becuase the Gateway Router is missing an SNAT and the traffic will be dropped.

Additionally the testing looks a little different from the others because we do not correctly setup dualstack clusters when running unit tests

Signed-off-by: Jacob Tanenbaum [email protected]
(cherry picked from commit e64d754)

4.16 is not using ginkgo v2 so the new tests that came in from this PR using DescribeTable() and Entry() from v2 had
to be converted to ginkgotable.DescribeTable and ginkgotable.Entry()

additionally, there were two cherry-pick conflicts so the bot could not handle this and it was created manually.

func (e *egressIPZoneController) deleteExternalGWPodSNATOps(ops []ovsdb.Operation, pod *kapi.Pod, podIPs []*net.IPNet, status egressipv1.EgressIPStatusItem, isOVNNetwork bool) ([]ovsdb.Operation, error) {
  if config.Gateway.DisableSNATMultipleGWs && status.Node == pod.Spec.NodeName && isOVNNetwork {
  	affectedIPs := util.MatchAllIPNetFamily(utilnet.IsIPv6String(status.EgressIP), podIPs)
  	if len(affectedIPs) == 0 {
  		return nil, nil // noting to do.
  	}
  	// remove snats to->nodeIP (from the node where pod exists if that node is also serving
  	// as an egress node for this pod) for these podIPs before adding the snat to->egressIP
  	extIPs, err := getExternalIPsGR(e.watchFactory, pod.Spec.NodeName)
  	if err != nil {
  		return nil, err
  	}
<<<<<<< HEAD
  	ops, err = deletePodSNATOps(e.nbClient, ops, pod.Spec.NodeName, extIPs, podIPs)
||||||| parent of e64d7549b (fixes overzealous deletion of SNAT in egressIP)
  	ops, err = deletePodSNATOps(e.nbClient, ops, e.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podIPs)
=======
  	ops, err = deletePodSNATOps(e.nbClient, ops, e.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, affectedIPs)
>>>>>>> e64d7549b (fixes overzealous deletion of SNAT in egressIP)
  	if err != nil {
  		return nil, err
  	}
  } else if config.Gateway.DisableSNATMultipleGWs {
  	// it means the pod host is different from the egressNode that is managing the pod
  	klog.V(5).Infof("Not deleting SNAT on %s since egress node managing %s/%s is %s or Egress IP is not SNAT'd by OVN", pod.Spec.NodeName, pod.Namespace, pod.Name, status.Node)
  }
  return ops, nil
}

which was resolved to just use:

ops, err = deletePodSNATOps(e.nbClient, ops, pod.Spec.NodeName, extIPs, affectedIPs)

<<<<<<< HEAD
||||||| parent of e64d7549b (fixes overzealous deletion of SNAT in egressIP)

// returns the address set with externalID "k8s.ovn.org/name": "egresssvc-served-pods"
func buildEgressIPServiceAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) {
  dbIDs := egresssvc.GetEgressServiceAddrSetDbIDs(DefaultNetworkControllerName)
  return addressset.GetTestDbAddrSets(dbIDs, ips)
}

// returns the address set with externalID "k8s.ovn.org/name": "egressip-served-pods""
func buildEgressIPServedPodsAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) {
  dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, DefaultNetworkControllerName)
  return addressset.GetTestDbAddrSets(dbIDs, ips)

}

// returns the address set with externalID "k8s.ovn.org/name": "node-ips"
func buildEgressIPNodeAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) {
  dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, DefaultNetworkControllerName)
  return addressset.GetTestDbAddrSets(dbIDs, ips)
}
=======

// returns the address set with externalID "k8s.ovn.org/name": "egresssvc-served-pods"
func buildEgressIPServiceAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) {
  dbIDs := egresssvc.GetEgressServiceAddrSetDbIDs(DefaultNetworkControllerName)
  return addressset.GetTestDbAddrSets(dbIDs, ips)
}

// returns the address set with externalID "k8s.ovn.org/name": "egressip-served-pods""
func buildEgressIPServedPodsAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) {
  dbIDs := getEgressIPAddrSetDbIDs(EgressIPServedPodsAddrSetName, DefaultNetworkControllerName)
  return addressset.GetTestDbAddrSets(dbIDs, ips)

}

// returns the address set with externalID "k8s.ovn.org/name": "node-ips"
func buildEgressIPNodeAddressSets(ips []string) (*nbdb.AddressSet, *nbdb.AddressSet) {
  dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, DefaultNetworkControllerName)
  return addressset.GetTestDbAddrSets(dbIDs, ips)
}

// returns the no reroute policies associated with services and directly to pods
// func getDefaultReroutePolicies() []*nbdb.LogicalRouterPolicy {
func buildDefaultReroutePolicies() ([]string, []libovsdbtest.TestData) {
  //	logicalReroutePolicies := []*nbdb.LogicalRouterPolicy{}
  testData := []libovsdbtest.TestData{}
  uuids := []string{}

  v4Subnets, v6Subnets := util.GetClusterSubnets()
  for i, v4Subnet := range v4Subnets {
  	testData = append(testData,
  		&nbdb.LogicalRouterPolicy{
  			Priority: types.DefaultNoRereoutePriority,
  			Match:    fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Subnet.String(), config.Gateway.V4JoinSubnet),
  			Action:   nbdb.LogicalRouterPolicyActionAllow,
  			UUID:     fmt.Sprintf("no-reroute-v4-service-%d-UUID", i),
  		},
  		&nbdb.LogicalRouterPolicy{
  			Priority: types.DefaultNoRereoutePriority,
  			Match:    fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Subnet.String(), v4Subnet.String()),
  			Action:   nbdb.LogicalRouterPolicyActionAllow,
  			UUID:     fmt.Sprintf("no-reroute-v4-%d-UUID", i),
  		},
  	)
  	uuids = append(uuids,
  		fmt.Sprintf("no-reroute-v4-service-%d-UUID", i),
  		fmt.Sprintf("no-reroute-v4-%d-UUID", i),
  	)

  }
  for i, v6Subnet := range v6Subnets {
  	testData = append(testData,
  		&nbdb.LogicalRouterPolicy{
  			Priority: types.DefaultNoRereoutePriority,
  			Match:    fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6Subnet.String(), config.Gateway.V6JoinSubnet),
  			Action:   nbdb.LogicalRouterPolicyActionAllow,
  			UUID:     fmt.Sprintf("no-reroute-v6-service-%d-UUID", i),
  		},
  		&nbdb.LogicalRouterPolicy{
  			Priority: types.DefaultNoRereoutePriority,
  			Match:    fmt.Sprintf("ip6.src == %s && ip6.dst == %s", v6Subnet.String(), v6Subnet.String()),
  			Action:   nbdb.LogicalRouterPolicyActionAllow,
  			UUID:     fmt.Sprintf("no-reroute-v6-%d-UUID", i),
  		},
  	)
  	uuids = append(uuids, fmt.Sprintf("no-reroute-v6-service-%d-UUID", i), fmt.Sprintf("no-reroute-v6-%d-UUID", i))
  }

  return uuids, testData

}

// makes the egressipDefaultNoReroutePolicies for the node and the corresponding address sets, additionally returns a slice of UUIDS used to append to the router policy
func buildDefaultNoRerouteNodePolicies(podAddresses, nodeAddresses, serviceAddresses []string) ([]string, []libovsdbtest.TestData) {
  testData := []libovsdbtest.TestData{}
  uuids := []string{}

  egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets(podAddresses)
  egressNodeIPsASv4, egressNodeIPsASv6 := buildEgressIPNodeAddressSets(nodeAddresses)
  egressSVCServedPodsASv4, egressSVCServedPodsASv6 := buildEgressIPServiceAddressSets(serviceAddresses)

  if config.IPv4Mode {
  	testData = append(testData, egressIPServedPodsASv4, egressNodeIPsASv4, egressSVCServedPodsASv4,
  		&nbdb.LogicalRouterPolicy{
  			Priority: types.DefaultNoRereoutePriority,
  			Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s",
  				egressIPServedPodsASv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name),
  			Action:  nbdb.LogicalRouterPolicyActionAllow,
  			UUID:    "default-no-reroute-node-v4-UUID",
  			Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark},
  		},
  	)
  	uuids = append(uuids, "default-no-reroute-node-v4-UUID")
  }
  if config.IPv6Mode {
  	testData = append(testData, egressIPServedPodsASv6, egressNodeIPsASv6, egressSVCServedPodsASv6,
  		&nbdb.LogicalRouterPolicy{
  			Priority: types.DefaultNoRereoutePriority,
  			Match: fmt.Sprintf("(ip6.src == $%s || ip6.src == $%s) && ip6.dst == $%s",
  				egressIPServedPodsASv6.Name, egressSVCServedPodsASv6.Name, egressNodeIPsASv6.Name),
  			Action:  nbdb.LogicalRouterPolicyActionAllow,
  			UUID:    "default-no-reroute-node-v6-UUID",
  			Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark},
  		},
  	)
  	uuids = append(uuids, "default-no-reroute-node-v6-UUID")
  }

  return uuids, testData

}
>>>>>>> e64d7549b (fixes overzealous deletion of SNAT in egressIP)

which came down to the cherry-pick not able to de-dup three identical functions buildEgressIPServiceAddressSets(), buildEgressIPServedPodsAddressSets() and buildEgressIPNodeAddressSets().

Instructions for interacting with me using PR comments are available here. If you have questions or suggestions related to my behavior, please file an issue against the openshift-eng/jira-lifecycle-plugin repository.

@openshift-ci-robot openshift-ci-robot removed the jira/invalid-bug Indicates that a referenced Jira bug is invalid for the branch this PR is targeting. label Feb 21, 2025
@jluhrsen
Copy link
Contributor Author

/jira refresh

@openshift-ci-robot
Copy link
Contributor

@jluhrsen: This pull request references Jira Issue OCPBUGS-50594, which is valid.

7 validation(s) were run on this bug
  • bug is open, matching expected state (open)
  • bug target version (4.16.z) matches configured target version for branch (4.16.z)
  • bug is in the state POST, which is one of the valid states (NEW, ASSIGNED, POST)
  • release note text is set and does not match the template
  • dependent bug Jira Issue OCPBUGS-48828 is in the state Closed (Done-Errata), which is one of the valid states (VERIFIED, RELEASE PENDING, CLOSED (ERRATA), CLOSED (CURRENT RELEASE), CLOSED (DONE), CLOSED (DONE-ERRATA))
  • dependent Jira Issue OCPBUGS-48828 targets the "4.17.0" version, which is one of the valid target versions: 4.17.0, 4.17.z
  • bug has dependents

No GitHub users were found matching the public email listed for the QA contact in Jira ([email protected]), skipping review request.

In response to this:

/jira refresh

Instructions for interacting with me using PR comments are available here. If you have questions or suggestions related to my behavior, please file an issue against the openshift-eng/jira-lifecycle-plugin repository.

Copy link
Contributor

openshift-ci bot commented Feb 21, 2025

@jluhrsen: The following tests failed, say /retest to rerun all failed tests or /retest-required to rerun all mandatory failed tests:

Test name Commit Details Required Rerun command
ci/prow/e2e-aws-ovn-windows ff27c7a link true /test e2e-aws-ovn-windows
ci/prow/e2e-azure-ovn ff27c7a link false /test e2e-azure-ovn
ci/prow/ovncore-perfscale-aws-ovn-large-cluster-density-v2 ff27c7a link false /test ovncore-perfscale-aws-ovn-large-cluster-density-v2
ci/prow/e2e-metal-ipi-ovn-dualstack-local-gateway ff27c7a link false /test e2e-metal-ipi-ovn-dualstack-local-gateway
ci/prow/unit ff27c7a link true /test unit
ci/prow/ovncore-perfscale-aws-ovn-large-node-density-cni ff27c7a link false /test ovncore-perfscale-aws-ovn-large-node-density-cni
ci/prow/ovncore-perfscale-aws-ovn-xlarge-cluster-density-v2 ff27c7a link false /test ovncore-perfscale-aws-ovn-xlarge-cluster-density-v2
ci/prow/ovncore-perfscale-aws-ovn-xlarge-node-density-cni ff27c7a link false /test ovncore-perfscale-aws-ovn-xlarge-node-density-cni
ci/prow/security ff27c7a link false /test security

Full PR test history. Your PR dashboard.

Instructions for interacting with me using PR comments are available here. If you have questions or suggestions related to my behavior, please file an issue against the kubernetes-sigs/prow repository. I understand the commands that are listed here.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
approved Indicates a PR has been approved by an approver from all required OWNERS files. backport-risk-assessed Indicates a PR to a release branch has been evaluated and considered safe to accept. cherry-pick-approved Indicates a cherry-pick PR into a release branch has been approved by the release branch manager. do-not-merge/hold Indicates that a PR should not merge because someone has issued a /hold command. jira/severity-important Referenced Jira bug's severity is important for the branch this PR is targeting. jira/valid-bug Indicates that a referenced Jira bug is valid for the branch this PR is targeting. jira/valid-reference Indicates that this PR references a valid Jira ticket of any type. qe-approved Signifies that QE has signed off on this PR
Projects
None yet
Development

Successfully merging this pull request may close these issues.