diff --git a/tests/e2e/ciliumnokp/Vagrantfile b/tests/e2e/ciliumnokp/Vagrantfile new file mode 100644 index 0000000000..0651f845a4 --- /dev/null +++ b/tests/e2e/ciliumnokp/Vagrantfile @@ -0,0 +1,94 @@ +ENV['VAGRANT_NO_PARALLEL'] = 'no' +NODE_ROLES = (ENV['E2E_NODE_ROLES'] || ["server-0", "agent-0" ]) +NODE_BOXES = (ENV['E2E_NODE_BOXES'] || ['generic/ubuntu2310', 'generic/ubuntu2310']) +GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") +RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") +NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i +NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 3072).to_i +NETWORK4_PREFIX = "10.10.10" +NETWORK6_PREFIX = "fd11:decf:c0ff:ee" +install_type = "" + +def provision(vm, roles, role_num, node_num) + vm.box = NODE_BOXES[node_num] + vm.hostname = "#{roles[0]}-#{role_num}" + node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}" + node_ip6 = "#{NETWORK6_PREFIX}::#{10+node_num}" + node_ip6_gw = "#{NETWORK6_PREFIX}::1" + # Only works with libvirt, which allows IPv4 + IPv6 on a single network/interface + vm.network "private_network", + :ip => node_ip4, + :netmask => "255.255.255.0", + :libvirt__dhcp_enabled => false, + :libvirt__forward_mode => "none", + :libvirt__guest_ipv6 => "yes", + :libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1", + :libvirt__ipv6_prefix => "64" + + vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" + load vagrant_defaults + + defaultOSConfigure(vm) + + scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" + vm.provision "IPv6 Setup", type: "shell", path: scripts_location + "/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, "cilium", vm.box] + + install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) + vm.provision "Ping Check", type: "shell", inline: "ping -4 -c 2 rke2.io" + + if roles.include?("server") && role_num == 0 + vm.provision "Create Cilium Manifest", type: "shell", path: "../scripts/cilium_nokubeproxy.sh", args: [ "#{NETWORK4_PREFIX}.100" ] + vm.provision :rke2, run: 'once' do |rke2| + rke2.env = %W[INSTALL_RKE2_TYPE=server #{install_type}] + rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 + rke2.config = <<~YAML + write-kubeconfig-mode: '0644' + node-external-ip: #{node_ip4},#{node_ip6} + node-ip: #{node_ip4},#{node_ip6} + token: vagrant-rke2 + cluster-cidr: 10.42.0.0/16,2001:cafe:42:0::/56 + service-cidr: 10.43.0.0/16,2001:cafe:43:0::/112 + bind-address: #{NETWORK4_PREFIX}.100 + cni: cilium + disable-kube-proxy: true + YAML + end + end + if roles.include?("agent") + vm.provision :rke2, run: 'once' do |rke2| + rke2.env = %W[INSTALL_RKE2_TYPE=agent #{install_type}] + rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 + rke2.install_path = false + rke2.config = <<~YAML + write-kubeconfig-mode: '0644' + server: https://#{NETWORK4_PREFIX}.100:9345 + node-ip: #{node_ip4},#{node_ip6} + node-external-ip: #{node_ip4},#{node_ip6} + token: vagrant-rke2 + YAML + end + end +end + +Vagrant.configure("2") do |config| + config.vagrant.plugins = ["vagrant-rke2", "vagrant-reload", "vagrant-libvirt"] + config.vm.provider "libvirt" do |v| + v.cpus = NODE_CPUS + v.memory = NODE_MEMORY + end + + if NODE_ROLES.kind_of?(String) + NODE_ROLES = NODE_ROLES.split(" ", -1) + end + if NODE_BOXES.kind_of?(String) + NODE_BOXES = NODE_BOXES.split(" ", -1) + end + + NODE_ROLES.each_with_index do |name, i| + config.vm.define name do |node| + roles = name.split("-", -1) + role_num = roles.pop.to_i + provision(node.vm, roles, role_num, i) + end + end +end diff --git a/tests/e2e/ciliumnokp/ciliumnokp_test.go b/tests/e2e/ciliumnokp/ciliumnokp_test.go new file mode 100644 index 0000000000..b9a9c2a76b --- /dev/null +++ b/tests/e2e/ciliumnokp/ciliumnokp_test.go @@ -0,0 +1,225 @@ +package ciliumnokp + +import ( + "flag" + "fmt" + + "os" + "strings" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/rancher/rke2/tests/e2e" +) + +var nodeOS = flag.String("nodeOS", "generic/ubuntu2310", "VM operating system") +var serverCount = flag.Int("serverCount", 1, "number of server nodes") +var agentCount = flag.Int("agentCount", 1, "number of agent nodes") +var ci = flag.Bool("ci", false, "running on CI") + +func Test_E2ECiliumNoKP(t *testing.T) { + flag.Parse() + RegisterFailHandler(Fail) + suiteConfig, reporterConfig := GinkgoConfiguration() + RunSpecs(t, "Validate dualstack in Cilium without kube-proxy Test Suite", suiteConfig, reporterConfig) +} + +var ( + kubeConfigFile string + serverNodeNames []string + agentNodeNames []string +) +var _ = ReportAfterEach(e2e.GenReport) + +var _ = Describe("Verify DualStack in Cilium without kube-proxy configuration", Ordered, func() { + + It("Starts up with no issues", func() { + var err error + serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + fmt.Println("CLUSTER CONFIG") + fmt.Println("OS:", *nodeOS) + fmt.Println("Server Nodes:", serverNodeNames) + fmt.Println("Agent Nodes:", agentNodeNames) + kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "620s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Pod Status", func() { + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParsePods(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Verifies that each node has IPv4 and IPv6", func() { + for _, node := range serverNodeNames { + cmd := fmt.Sprintf("kubectl get node %s -o jsonpath='{.status.addresses}' --kubeconfig=%s | jq '.[] | select(.type == \"ExternalIP\") | .address'", + node, kubeConfigFile) + res, err := e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred(), res) + Expect(res).Should(ContainSubstring("10.10.10")) + Expect(res).Should(ContainSubstring("fd11:decf:c0ff")) + } + }) + + It("Verifies that cilium config is correct", func() { + cmdCiliumAgents := "kubectl get pods -l app.kubernetes.io/name=cilium-agent -n kube-system -o=name --kubeconfig=" + kubeConfigFile + res, err := e2e.RunCommand(cmdCiliumAgents) + Expect(err).NotTo(HaveOccurred(), res) + ciliumAgents := strings.Split(strings.TrimSpace(res), "\n") + Expect(len(ciliumAgents)).Should(Equal(len(serverNodeNames) + len(agentNodeNames))) + for _, ciliumAgent := range ciliumAgents { + cmd := "kubectl exec " + ciliumAgent + " -n kube-system -c cilium-agent --kubeconfig=" + kubeConfigFile + " -- cilium-dbg status --verbose | grep -e 'BPF' -e 'HostPort' -e 'LoadBalancer'" + res, err := e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred(), res) + // We expect the following output and the important parts are HostPort, LoadBalancer, Host Routing and Masquerading + // Host Routing: BPF + // Masquerading: BPF + // Clock Source for BPF: ktime + // - LoadBalancer: Enabled + // - HostPort: Enabled + // BPF Maps: dynamic sizing: on (ratio: 0.002500) + Expect(res).Should(ContainSubstring("Host Routing")) + Expect(res).Should(ContainSubstring("Masquerading")) + Expect(res).Should(ContainSubstring("LoadBalancer: Enabled")) + Expect(res).Should(ContainSubstring("HostPort: Enabled")) + } + }) + + It("Verifies ClusterIP Service", func() { + _, err := e2e.DeployWorkload("dualstack_clusterip.yaml", kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + Eventually(func() (string, error) { + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + return e2e.RunCommand(cmd) + }, "120s", "5s").Should(ContainSubstring("ds-clusterip-pod")) + + // Checks both IPv4 and IPv6 + clusterips, err := e2e.FetchClusterIP(kubeConfigFile, "ds-clusterip-svc", true) + Expect(err).NotTo(HaveOccurred()) + for _, ip := range strings.Split(clusterips, ",") { + if strings.Contains(ip, "::") { + ip = "[" + ip + "]" + } + pods, err := e2e.ParsePods(kubeConfigFile, false) + Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if !strings.HasPrefix(pod.Name, "ds-clusterip-pod") { + continue + } + cmd := fmt.Sprintf("curl -L --insecure http://%s", ip) + Eventually(func() (string, error) { + return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + }, "60s", "5s").Should(ContainSubstring("Welcome to nginx!"), "failed cmd: "+cmd) + } + } + }) + + It("Verifies internode connectivity", func() { + _, err := e2e.DeployWorkload("pod_client.yaml", kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + + // Wait for the pod_client to have an IP + Eventually(func() string { + ips, _ := e2e.PodIPsUsingLabel(kubeConfigFile, "app=client") + return ips[0].Ipv4 + }, "40s", "5s").Should(ContainSubstring("10.42"), "failed getClientIPs") + + clientIPs, err := e2e.PodIPsUsingLabel(kubeConfigFile, "app=client") + Expect(err).NotTo(HaveOccurred()) + for _, ip := range clientIPs { + cmd := "kubectl exec svc/client-curl --kubeconfig=" + kubeConfigFile + " -- curl -m7 " + ip.Ipv4 + "/name.html" + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "20s", "3s").Should(ContainSubstring("client-deployment"), "failed cmd: "+cmd) + } + }) + + It("Verifies Ingress", func() { + _, err := e2e.DeployWorkload("dualstack_ingress.yaml", kubeConfigFile) + Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") + cmd := "kubectl get ingress ds-ingress --kubeconfig=" + kubeConfigFile + " -o jsonpath=\"{.spec.rules[*].host}\"" + hostName, err := e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) + nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) + Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) + for _, node := range nodeIPs { + cmd := fmt.Sprintf("curl --header host:%s http://%s/name.html", hostName, node.Ipv4) + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "30s", "2s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) + cmd = fmt.Sprintf("curl --header host:%s http://[%s]/name.html", hostName, node.Ipv6) + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "10s", "1s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) + } + }) + + It("Verifies NodePort Service", func() { + _, err := e2e.DeployWorkload("dualstack_nodeport.yaml", kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + cmd := "kubectl get service ds-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" + nodeport, err := e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) + nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, node := range nodeIPs { + cmd = "curl -L --insecure http://" + node.Ipv4 + ":" + nodeport + "/name.html" + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "30s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd) + cmd = "curl -L --insecure http://[" + node.Ipv6 + "]:" + nodeport + "/name.html" + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd) + } + }) + + It("Verifies there are no required iptables", func() { + // Check that there are no iptables rules with KUBE-SVC and HOSTPORT + cmdiptables := "sudo iptables-save | grep -e 'KUBE-SVC' -e 'HOSTPORT' | wc -l" + for i := range serverNodeNames { + res, err := e2e.RunCmdOnNode(cmdiptables, serverNodeNames[i]) + Expect(err).NotTo(HaveOccurred(), res) + Expect(res).Should(ContainSubstring("0")) + } + }) + +}) + +var failed bool +var _ = AfterEach(func() { + failed = failed || CurrentSpecReport().Failed() +}) + +var _ = AfterSuite(func() { + if failed && !*ci { + fmt.Println("FAILED!") + } else { + Expect(e2e.DestroyCluster()).To(Succeed()) + Expect(os.Remove(kubeConfigFile)).To(Succeed()) + } +}) diff --git a/tests/e2e/dnscache/Vagrantfile b/tests/e2e/dnscache/Vagrantfile index 192cc6c46b..6b54803d3b 100644 --- a/tests/e2e/dnscache/Vagrantfile +++ b/tests/e2e/dnscache/Vagrantfile @@ -26,12 +26,12 @@ def provision(vm, roles, role_num, node_num) :libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1", :libvirt__ipv6_prefix => "64" - vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" + vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" load vagrant_defaults defaultOSConfigure(vm) - scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" + scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" vm.provision "IPv6 Setup", type: "shell", path: scripts_location + "/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, CNI, vm.box] install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) @@ -84,10 +84,7 @@ Vagrant.configure("2") do |config| NODE_BOXES = NODE_BOXES.split(" ", -1) end - # Must iterate on the index, vagrant does not understand iterating - # over the node roles themselves - NODE_ROLES.length.times do |i| - name = NODE_ROLES[i] + NODE_ROLES.each_with_index do |name, i| config.vm.define name do |node| roles = name.split("-", -1) role_num = roles.pop.to_i diff --git a/tests/e2e/dualstack/Vagrantfile b/tests/e2e/dualstack/Vagrantfile index b4f185fb85..b8c1478f95 100644 --- a/tests/e2e/dualstack/Vagrantfile +++ b/tests/e2e/dualstack/Vagrantfile @@ -28,12 +28,12 @@ def provision(vm, roles, role_num, node_num) :libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1", :libvirt__ipv6_prefix => "64" - vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" + vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" load vagrant_defaults defaultOSConfigure(vm) - scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" + scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" vm.provision "IPv6 Setup", type: "shell", path: scripts_location + "/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, CNI, vm.box] install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) @@ -68,7 +68,6 @@ def provision(vm, roles, role_num, node_num) cluster-cidr: 10.42.0.0/16,2001:cafe:42:0::/56 service-cidr: 10.43.0.0/16,2001:cafe:43:0::/112 cni: #{CNI} - kubelet-arg: "--node-ip=0.0.0.0" # Workaround for https://github.com/kubernetes/kubernetes/issues/111695 YAML end end @@ -83,7 +82,6 @@ def provision(vm, roles, role_num, node_num) node-ip: #{node_ip4},#{node_ip6} server: https://#{NETWORK4_PREFIX}.100:9345 token: vagrant-rke2 - kubelet-arg: "--node-ip=0.0.0.0" # Workaround for https://github.com/kubernetes/kubernetes/issues/111695 YAML end end diff --git a/tests/e2e/dualstack/dualstack_test.go b/tests/e2e/dualstack/dualstack_test.go index 0da481c163..e7c2c30962 100644 --- a/tests/e2e/dualstack/dualstack_test.go +++ b/tests/e2e/dualstack/dualstack_test.go @@ -16,49 +16,12 @@ import ( // Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64 var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") var serverCount = flag.Int("serverCount", 3, "number of server nodes") -var agentCount = flag.Int("agentCount", 0, "number of agent nodes") +var agentCount = flag.Int("agentCount", 1, "number of agent nodes") var ci = flag.Bool("ci", false, "running on CI") // Environment Variables Info: // E2E_RELEASE_VERSION=v1.23.1+rke2r1 or nil for latest commit from master -type objIP struct { - name string - ipv4 string - ipv6 string -} - -func getPodIPs(kubeConfigFile string) ([]objIP, error) { - cmd := `kubectl get pods -A -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIPs[*].ip}{"\n"}{end}' --kubeconfig=` + kubeConfigFile - return getObjIPs(cmd) -} -func getNodeIPs(kubeConfigFile string) ([]objIP, error) { - cmd := `kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.addresses[?(@.type == "ExternalIP")].address}{"\n"}{end}' --kubeconfig=` + kubeConfigFile - return getObjIPs(cmd) -} - -func getObjIPs(cmd string) ([]objIP, error) { - var objIPs []objIP - res, err := e2e.RunCommand(cmd) - if err != nil { - return nil, err - } - objs := strings.Split(res, "\n") - objs = objs[:len(objs)-1] - - for _, obj := range objs { - fields := strings.Fields(obj) - if len(fields) > 2 { - objIPs = append(objIPs, objIP{name: fields[0], ipv4: fields[1], ipv6: fields[2]}) - } else if len(fields) > 1 { - objIPs = append(objIPs, objIP{name: fields[0], ipv4: fields[1]}) - } else { - objIPs = append(objIPs, objIP{name: fields[0]}) - } - } - return objIPs, nil -} - func Test_E2EDualStack(t *testing.T) { flag.Parse() RegisterFailHandler(Fail) @@ -126,11 +89,11 @@ var _ = Describe("Verify DualStack Configuration", Ordered, func() { } }) It("Verifies that each pod has IPv4 and IPv6", func() { - podIPs, err := getPodIPs(kubeConfigFile) + podIPs, err := e2e.GetPodIPs(kubeConfigFile) Expect(err).NotTo(HaveOccurred()) for _, pod := range podIPs { - Expect(pod.ipv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42."), ContainSubstring("192.168.")), pod.name) - Expect(pod.ipv6).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.name) + Expect(pod.Ipv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42."), ContainSubstring("192.168.")), pod.Name) + Expect(pod.Ipv6).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.Name) } }) @@ -168,14 +131,14 @@ var _ = Describe("Verify DualStack Configuration", Ordered, func() { cmd := "kubectl get ingress ds-ingress --kubeconfig=" + kubeConfigFile + " -o jsonpath=\"{.spec.rules[*].host}\"" hostName, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) - nodeIPs, err := getNodeIPs(kubeConfigFile) + nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) for _, node := range nodeIPs { - cmd := fmt.Sprintf("curl --header host:%s http://%s/name.html", hostName, node.ipv4) + cmd := fmt.Sprintf("curl --header host:%s http://%s/name.html", hostName, node.Ipv4) Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "10s", "2s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) - cmd = fmt.Sprintf("curl --header host:%s http://[%s]/name.html", hostName, node.ipv6) + cmd = fmt.Sprintf("curl --header host:%s http://[%s]/name.html", hostName, node.Ipv6) Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "5s", "1s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) @@ -188,14 +151,14 @@ var _ = Describe("Verify DualStack Configuration", Ordered, func() { cmd := "kubectl get service ds-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" nodeport, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) - nodeIPs, err := getNodeIPs(kubeConfigFile) + nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) Expect(err).NotTo(HaveOccurred()) for _, node := range nodeIPs { - cmd = "curl -L --insecure http://" + node.ipv4 + ":" + nodeport + "/name.html" + cmd = "curl -L --insecure http://" + node.Ipv4 + ":" + nodeport + "/name.html" Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd) - cmd = "curl -L --insecure http://[" + node.ipv6 + "]:" + nodeport + "/name.html" + cmd = "curl -L --insecure http://[" + node.Ipv6 + "]:" + nodeport + "/name.html" Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd) diff --git a/tests/e2e/mixedos/Vagrantfile b/tests/e2e/mixedos/Vagrantfile index bd176ef0fc..5a6273e982 100644 --- a/tests/e2e/mixedos/Vagrantfile +++ b/tests/e2e/mixedos/Vagrantfile @@ -19,7 +19,7 @@ def provision(vm, role, role_num, node_num) vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" vagrant_defaults = '../vagrantdefaults.rb' - load vagrant_defaults if File.exists?(vagrant_defaults) + load vagrant_defaults if File.exist?(vagrant_defaults) defaultOSConfigure(vm) diff --git a/tests/e2e/mixedos/mixedos_test.go b/tests/e2e/mixedos/mixedos_test.go index b3eb859ac9..0c2a61f7b4 100644 --- a/tests/e2e/mixedos/mixedos_test.go +++ b/tests/e2e/mixedos/mixedos_test.go @@ -15,8 +15,8 @@ import ( // Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64 var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "operating system for linux nodes") -var serverCount = flag.Int("serverCount", 3, "number of server nodes") -var linuxAgentCount = flag.Int("linuxAgentCount", 0, "number of linux agent nodes") +var serverCount = flag.Int("serverCount", 1, "number of server nodes") +var linuxAgentCount = flag.Int("linuxAgentCount", 1, "number of linux agent nodes") var windowsAgentCount = flag.Int("windowsAgentCount", 1, "number of windows agent nodes") var ci = flag.Bool("ci", false, "running on CI") @@ -40,7 +40,7 @@ func createMixedCluster(nodeOS string, serverCount, linuxAgentCount, windowsAgen } windowsAgentNames := []string{} for i := 0; i < linuxAgentCount; i++ { - windowsAgentNames = append(windowsAgentNames, "linux-agent-"+strconv.Itoa(i)) + windowsAgentNames = append(windowsAgentNames, "windows-agent-"+strconv.Itoa(i)) } nodeRoles := strings.Join(serverNodeNames, " ") + " " + strings.Join(linuxAgentNames, " ") + " " + strings.Join(windowsAgentNames, " ") nodeRoles = strings.TrimSpace(nodeRoles) @@ -81,7 +81,8 @@ var _ = Describe("Verify Basic Cluster Creation", Ordered, func() { fmt.Println("CLUSTER CONFIG") fmt.Println("OS:", *nodeOS) fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", linuxAgentNames) + fmt.Println("Linux Agent Nodes:", linuxAgentNames) + fmt.Println("Windows Agent Nodes:", windowsAgentNames) kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) Expect(err).NotTo(HaveOccurred()) }) @@ -123,13 +124,13 @@ var _ = Describe("Verify Basic Cluster Creation", Ordered, func() { // Wait for the pod_client pods to have an IP Eventually(func() string { ips, _ := e2e.PodIPsUsingLabel(kubeConfigFile, "app=client") - return ips[0] + return ips[0].Ipv4 }, "120s", "10s").Should(ContainSubstring("10.42"), "failed getClientIPs") // Wait for the windows_app_deployment pods to have an IP (We must wait 250s because it takes time) Eventually(func() string { ips, _ := e2e.PodIPsUsingLabel(kubeConfigFile, "app=windows-app") - return ips[0] + return ips[0].Ipv4 }, "620s", "10s").Should(ContainSubstring("10.42"), "failed getClientIPs") // Test Linux -> Windows communication diff --git a/tests/e2e/mixedosbgp/Vagrantfile b/tests/e2e/mixedosbgp/Vagrantfile index 61ba3da434..d9bdcc07eb 100644 --- a/tests/e2e/mixedosbgp/Vagrantfile +++ b/tests/e2e/mixedosbgp/Vagrantfile @@ -19,7 +19,7 @@ def provision(vm, role, role_num, node_num) vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" vagrant_defaults = '../vagrantdefaults.rb' - load vagrant_defaults if File.exists?(vagrant_defaults) + load vagrant_defaults if File.exist?(vagrant_defaults) defaultOSConfigure(vm) diff --git a/tests/e2e/mixedosbgp/mixedosbgp_test.go b/tests/e2e/mixedosbgp/mixedosbgp_test.go index 06de9eff1e..a64bacdce6 100644 --- a/tests/e2e/mixedosbgp/mixedosbgp_test.go +++ b/tests/e2e/mixedosbgp/mixedosbgp_test.go @@ -130,13 +130,13 @@ var _ = Describe("Verify Basic Cluster Creation", Ordered, func() { // Wait for the pod_client pods to have an IP Eventually(func() string { ips, _ := e2e.PodIPsUsingLabel(kubeConfigFile, "app=client") - return ips[0] + return ips[0].Ipv4 }, "120s", "10s").Should(ContainSubstring("10.42"), "failed getClientIPs") // Wait for the windows_app_deployment pods to have an IP (We must wait 250s because it takes time) Eventually(func() string { ips, _ := e2e.PodIPsUsingLabel(kubeConfigFile, "app=windows-app") - return ips[0] + return ips[0].Ipv4 }, "620s", "10s").Should(ContainSubstring("10.42"), "failed getClientIPs") // Verify there are BGP routes diff --git a/tests/e2e/multus/Vagrantfile b/tests/e2e/multus/Vagrantfile index 055951fe4a..a726475248 100644 --- a/tests/e2e/multus/Vagrantfile +++ b/tests/e2e/multus/Vagrantfile @@ -26,12 +26,12 @@ def provision(vm, roles, role_num, node_num) :libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1", :libvirt__ipv6_prefix => "64" - vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" + vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" load vagrant_defaults defaultOSConfigure(vm) - scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" + scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" vm.provision "IPv6 Setup", type: "shell", path: scripts_location + "/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, CNI, vm.box] install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) @@ -83,10 +83,7 @@ Vagrant.configure("2") do |config| NODE_BOXES = NODE_BOXES.split(" ", -1) end - # Must iterate on the index, vagrant does not understand iterating - # over the node roles themselves - NODE_ROLES.length.times do |i| - name = NODE_ROLES[i] + NODE_ROLES.each_with_index do |name, i| config.vm.define name do |node| roles = name.split("-", -1) role_num = roles.pop.to_i diff --git a/tests/e2e/scripts/cilium_nokubeproxy.sh b/tests/e2e/scripts/cilium_nokubeproxy.sh new file mode 100644 index 0000000000..d6bbd5a93c --- /dev/null +++ b/tests/e2e/scripts/cilium_nokubeproxy.sh @@ -0,0 +1,25 @@ +#!/bin/bash +ip4_addr=$1 + +# Set Cilium parameters to get as much BPF as possible and as a consequence +# as less iptables rules as possible +mkdir -p /var/lib/rancher/rke2/server/manifests + +echo "Creating cilium chart" +echo "apiVersion: helm.cattle.io/v1 +kind: HelmChartConfig +metadata: + name: rke2-cilium + namespace: kube-system +spec: + valuesContent: |- + ipv6: + enabled: true + devices: eth1 + kubeProxyReplacement: true + k8sServiceHost: $ip4_addr + k8sServicePort: 6443 + cni: + chainingMode: none + bpf: + masquerade: true" > /var/lib/rancher/rke2/server/manifests/e2e-cilium.yaml \ No newline at end of file diff --git a/tests/e2e/secretsencryption/Vagrantfile b/tests/e2e/secretsencryption/Vagrantfile index f0f7e749d1..c01a9a9d2a 100644 --- a/tests/e2e/secretsencryption/Vagrantfile +++ b/tests/e2e/secretsencryption/Vagrantfile @@ -17,8 +17,8 @@ def provision(vm, roles, role_num, node_num) node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" - scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" - vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" + scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" + vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" load vagrant_defaults defaultOSConfigure(vm) diff --git a/tests/e2e/secretsencryption_old/Vagrantfile b/tests/e2e/secretsencryption_old/Vagrantfile index f0f7e749d1..c01a9a9d2a 100644 --- a/tests/e2e/secretsencryption_old/Vagrantfile +++ b/tests/e2e/secretsencryption_old/Vagrantfile @@ -17,8 +17,8 @@ def provision(vm, roles, role_num, node_num) node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" - scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" - vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" + scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" + vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" load vagrant_defaults defaultOSConfigure(vm) diff --git a/tests/e2e/splitserver/Vagrantfile b/tests/e2e/splitserver/Vagrantfile index 647615382a..dcc8de0a6d 100644 --- a/tests/e2e/splitserver/Vagrantfile +++ b/tests/e2e/splitserver/Vagrantfile @@ -18,8 +18,8 @@ def provision(vm, role, role_num, node_num) node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" - scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" - vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" + scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" + vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" load vagrant_defaults defaultOSConfigure(vm) diff --git a/tests/e2e/testutils.go b/tests/e2e/testutils.go index becad2c9e5..628577e940 100644 --- a/tests/e2e/testutils.go +++ b/tests/e2e/testutils.go @@ -41,6 +41,12 @@ type NodeError struct { Err error } +type objIP struct { + Name string + Ipv4 string + Ipv6 string +} + func (ne *NodeError) Error() string { return fmt.Sprintf("failed creating cluster: %s: %v", ne.Cmd, ne.Err) } @@ -484,13 +490,43 @@ func UpgradeCluster(serverNodenames []string, agentNodenames []string) error { return nil } -// PodIPsUsingLabel returns the IPs of the pods with a label (only single-stack supported) -func PodIPsUsingLabel(kubeConfigFile string, label string) ([]string, error) { - cmd := `kubectl get pods -l ` + label + ` -o=jsonpath='{range .items[*]}{.status.podIPs[*].ip}{" "}{end}' --kubeconfig=` + kubeConfigFile +// PodIPsUsingLabel returns the IPs of the pods with a label +func PodIPsUsingLabel(kubeConfigFile string, label string) ([]objIP, error) { + cmd := `kubectl get pods -l ` + label + ` -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIPs[*].ip}{"\n"}{end}' --kubeconfig=` + kubeConfigFile + return getObjIPs(cmd) +} + +// GetPodIPs returns the IPs of all the pods +func GetPodIPs(kubeConfigFile string) ([]objIP, error) { + cmd := `kubectl get pods -A -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIPs[*].ip}{"\n"}{end}' --kubeconfig=` + kubeConfigFile + return getObjIPs(cmd) +} + +// GetNodeIPs returns the IPs of the nodes +func GetNodeIPs(kubeConfigFile string) ([]objIP, error) { + cmd := `kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.addresses[?(@.type == "ExternalIP")].address}{"\n"}{end}' --kubeconfig=` + kubeConfigFile + return getObjIPs(cmd) +} + +// getObjIPs processes the IPs of the requested objects +func getObjIPs(cmd string) ([]objIP, error) { + var objIPs []objIP res, err := RunCommand(cmd) if err != nil { return nil, err } - - return strings.Split(res, " "), nil + objs := strings.Split(res, "\n") + objs = objs[:len(objs)-1] + + for _, obj := range objs { + fields := strings.Fields(obj) + if len(fields) > 2 { + objIPs = append(objIPs, objIP{Name: fields[0], Ipv4: fields[1], Ipv6: fields[2]}) + } else if len(fields) > 1 { + objIPs = append(objIPs, objIP{Name: fields[0], Ipv4: fields[1]}) + } else { + objIPs = append(objIPs, objIP{Name: fields[0]}) + } + } + return objIPs, nil } diff --git a/tests/e2e/upgradecluster/Vagrantfile b/tests/e2e/upgradecluster/Vagrantfile index a4d3489236..bd269f4b77 100644 --- a/tests/e2e/upgradecluster/Vagrantfile +++ b/tests/e2e/upgradecluster/Vagrantfile @@ -19,9 +19,9 @@ def provision(vm, roles, role_num, node_num) node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" - scripts_location = Dir.exists?("./scripts/") ? "./scripts/" : "../scripts/" + scripts_location = Dir.exist?("./scripts/") ? "./scripts/" : "../scripts/" vagrant_defaults = '../vagrantdefaults.rb' - load vagrant_defaults if File.exists?(vagrant_defaults) + load vagrant_defaults if File.exist?(vagrant_defaults) defaultOSConfigure(vm) diff --git a/tests/e2e/vagrantdefaults.rb b/tests/e2e/vagrantdefaults.rb index 8fee28a527..3f5ad61cee 100644 --- a/tests/e2e/vagrantdefaults.rb +++ b/tests/e2e/vagrantdefaults.rb @@ -17,7 +17,7 @@ def getInstallType(vm, version, branch) return "INSTALL_RKE2_VERSION=#{version}" end # Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build - scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" + scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" vm.provision "shell", path: scripts_location + "/latest_commit.sh", args: [branch, "/tmp/rke2_commits"] return "INSTALL_RKE2_COMMIT=$(head\ -n\ 1\ /tmp/rke2_commits)" end @@ -32,4 +32,4 @@ def loadManifests(vm, files) files.each do |file| vm.provision "file", source: file, destination: "/var/lib/rancher/rke2/server/manifests/#{File.basename(file)}" end -end \ No newline at end of file +end diff --git a/tests/e2e/validatecluster/Vagrantfile b/tests/e2e/validatecluster/Vagrantfile index 7227728c8b..cc3374c73e 100644 --- a/tests/e2e/validatecluster/Vagrantfile +++ b/tests/e2e/validatecluster/Vagrantfile @@ -20,8 +20,8 @@ def provision(vm, roles, role_num, node_num) node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" - scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" - vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" + scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" + vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" load vagrant_defaults defaultOSConfigure(vm)