Skip to content

Commit

Permalink
Add cilium no proxy e2e test (#5885) (#5967)
Browse files Browse the repository at this point in the history
* Add cilium no proxy e2e test



* Update the old loop



* Replace deprecated exists by exist



* Remove kubelet-arg as it is not needed anymore



---------

Signed-off-by: Manuel Buil <[email protected]>
  • Loading branch information
manuelbuil authored May 27, 2024
1 parent 1290848 commit 27e72f7
Show file tree
Hide file tree
Showing 18 changed files with 426 additions and 90 deletions.
94 changes: 94 additions & 0 deletions tests/e2e/ciliumnokp/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] || ["server-0", "agent-0" ])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] || ['generic/ubuntu2310', 'generic/ubuntu2310'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 3072).to_i
NETWORK4_PREFIX = "10.10.10"
NETWORK6_PREFIX = "fd11:decf:c0ff:ee"
install_type = ""

def provision(vm, roles, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = "#{roles[0]}-#{role_num}"
node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}"
node_ip6 = "#{NETWORK6_PREFIX}::#{10+node_num}"
node_ip6_gw = "#{NETWORK6_PREFIX}::1"
# Only works with libvirt, which allows IPv4 + IPv6 on a single network/interface
vm.network "private_network",
:ip => node_ip4,
:netmask => "255.255.255.0",
:libvirt__dhcp_enabled => false,
:libvirt__forward_mode => "none",
:libvirt__guest_ipv6 => "yes",
:libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1",
:libvirt__ipv6_prefix => "64"

vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults

defaultOSConfigure(vm)

scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts"
vm.provision "IPv6 Setup", type: "shell", path: scripts_location + "/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, "cilium", vm.box]

install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "Ping Check", type: "shell", inline: "ping -4 -c 2 rke2.io"

if roles.include?("server") && role_num == 0
vm.provision "Create Cilium Manifest", type: "shell", path: "../scripts/cilium_nokubeproxy.sh", args: [ "#{NETWORK4_PREFIX}.100" ]
vm.provision :rke2, run: 'once' do |rke2|
rke2.env = %W[INSTALL_RKE2_TYPE=server #{install_type}]
rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
rke2.config = <<~YAML
write-kubeconfig-mode: '0644'
node-external-ip: #{node_ip4},#{node_ip6}
node-ip: #{node_ip4},#{node_ip6}
token: vagrant-rke2
cluster-cidr: 10.42.0.0/16,2001:cafe:42:0::/56
service-cidr: 10.43.0.0/16,2001:cafe:43:0::/112
bind-address: #{NETWORK4_PREFIX}.100
cni: cilium
disable-kube-proxy: true
YAML
end
end
if roles.include?("agent")
vm.provision :rke2, run: 'once' do |rke2|
rke2.env = %W[INSTALL_RKE2_TYPE=agent #{install_type}]
rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
rke2.install_path = false
rke2.config = <<~YAML
write-kubeconfig-mode: '0644'
server: https://#{NETWORK4_PREFIX}.100:9345
node-ip: #{node_ip4},#{node_ip6}
node-external-ip: #{node_ip4},#{node_ip6}
token: vagrant-rke2
YAML
end
end
end

Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-rke2", "vagrant-reload", "vagrant-libvirt"]
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end

if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end

NODE_ROLES.each_with_index do |name, i|
config.vm.define name do |node|
roles = name.split("-", -1)
role_num = roles.pop.to_i
provision(node.vm, roles, role_num, i)
end
end
end
225 changes: 225 additions & 0 deletions tests/e2e/ciliumnokp/ciliumnokp_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,225 @@
package ciliumnokp

import (
"flag"
"fmt"

"os"
"strings"
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/rancher/rke2/tests/e2e"
)

var nodeOS = flag.String("nodeOS", "generic/ubuntu2310", "VM operating system")
var serverCount = flag.Int("serverCount", 1, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var ci = flag.Bool("ci", false, "running on CI")

func Test_E2ECiliumNoKP(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Validate dualstack in Cilium without kube-proxy Test Suite", suiteConfig, reporterConfig)
}

var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)
var _ = ReportAfterEach(e2e.GenReport)

var _ = Describe("Verify DualStack in Cilium without kube-proxy configuration", Ordered, func() {

It("Starts up with no issues", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})

It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "620s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})

It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})

It("Verifies that each node has IPv4 and IPv6", func() {
for _, node := range serverNodeNames {
cmd := fmt.Sprintf("kubectl get node %s -o jsonpath='{.status.addresses}' --kubeconfig=%s | jq '.[] | select(.type == \"ExternalIP\") | .address'",
node, kubeConfigFile)
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), res)
Expect(res).Should(ContainSubstring("10.10.10"))
Expect(res).Should(ContainSubstring("fd11:decf:c0ff"))
}
})

It("Verifies that cilium config is correct", func() {
cmdCiliumAgents := "kubectl get pods -l app.kubernetes.io/name=cilium-agent -n kube-system -o=name --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmdCiliumAgents)
Expect(err).NotTo(HaveOccurred(), res)
ciliumAgents := strings.Split(strings.TrimSpace(res), "\n")
Expect(len(ciliumAgents)).Should(Equal(len(serverNodeNames) + len(agentNodeNames)))
for _, ciliumAgent := range ciliumAgents {
cmd := "kubectl exec " + ciliumAgent + " -n kube-system -c cilium-agent --kubeconfig=" + kubeConfigFile + " -- cilium-dbg status --verbose | grep -e 'BPF' -e 'HostPort' -e 'LoadBalancer'"
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), res)
// We expect the following output and the important parts are HostPort, LoadBalancer, Host Routing and Masquerading
// Host Routing: BPF
// Masquerading: BPF
// Clock Source for BPF: ktime
// - LoadBalancer: Enabled
// - HostPort: Enabled
// BPF Maps: dynamic sizing: on (ratio: 0.002500)
Expect(res).Should(ContainSubstring("Host Routing"))
Expect(res).Should(ContainSubstring("Masquerading"))
Expect(res).Should(ContainSubstring("LoadBalancer: Enabled"))
Expect(res).Should(ContainSubstring("HostPort: Enabled"))
}
})

It("Verifies ClusterIP Service", func() {
_, err := e2e.DeployWorkload("dualstack_clusterip.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "120s", "5s").Should(ContainSubstring("ds-clusterip-pod"))

// Checks both IPv4 and IPv6
clusterips, err := e2e.FetchClusterIP(kubeConfigFile, "ds-clusterip-svc", true)
Expect(err).NotTo(HaveOccurred())
for _, ip := range strings.Split(clusterips, ",") {
if strings.Contains(ip, "::") {
ip = "[" + ip + "]"
}
pods, err := e2e.ParsePods(kubeConfigFile, false)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if !strings.HasPrefix(pod.Name, "ds-clusterip-pod") {
continue
}
cmd := fmt.Sprintf("curl -L --insecure http://%s", ip)
Eventually(func() (string, error) {
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
}, "60s", "5s").Should(ContainSubstring("Welcome to nginx!"), "failed cmd: "+cmd)
}
}
})

It("Verifies internode connectivity", func() {
_, err := e2e.DeployWorkload("pod_client.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())

// Wait for the pod_client to have an IP
Eventually(func() string {
ips, _ := e2e.PodIPsUsingLabel(kubeConfigFile, "app=client")
return ips[0].Ipv4
}, "40s", "5s").Should(ContainSubstring("10.42"), "failed getClientIPs")

clientIPs, err := e2e.PodIPsUsingLabel(kubeConfigFile, "app=client")
Expect(err).NotTo(HaveOccurred())
for _, ip := range clientIPs {
cmd := "kubectl exec svc/client-curl --kubeconfig=" + kubeConfigFile + " -- curl -m7 " + ip.Ipv4 + "/name.html"
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "20s", "3s").Should(ContainSubstring("client-deployment"), "failed cmd: "+cmd)
}
})

It("Verifies Ingress", func() {
_, err := e2e.DeployWorkload("dualstack_ingress.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
cmd := "kubectl get ingress ds-ingress --kubeconfig=" + kubeConfigFile + " -o jsonpath=\"{.spec.rules[*].host}\""
hostName, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
for _, node := range nodeIPs {
cmd := fmt.Sprintf("curl --header host:%s http://%s/name.html", hostName, node.Ipv4)
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "30s", "2s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd)
cmd = fmt.Sprintf("curl --header host:%s http://[%s]/name.html", hostName, node.Ipv6)
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "10s", "1s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd)
}
})

It("Verifies NodePort Service", func() {
_, err := e2e.DeployWorkload("dualstack_nodeport.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
cmd := "kubectl get service ds-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
nodeport, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeIPs {
cmd = "curl -L --insecure http://" + node.Ipv4 + ":" + nodeport + "/name.html"
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "30s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd)
cmd = "curl -L --insecure http://[" + node.Ipv6 + "]:" + nodeport + "/name.html"
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd)
}
})

It("Verifies there are no required iptables", func() {
// Check that there are no iptables rules with KUBE-SVC and HOSTPORT
cmdiptables := "sudo iptables-save | grep -e 'KUBE-SVC' -e 'HOSTPORT' | wc -l"
for i := range serverNodeNames {
res, err := e2e.RunCmdOnNode(cmdiptables, serverNodeNames[i])
Expect(err).NotTo(HaveOccurred(), res)
Expect(res).Should(ContainSubstring("0"))
}
})

})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})
9 changes: 3 additions & 6 deletions tests/e2e/dnscache/Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,12 @@ def provision(vm, roles, role_num, node_num)
:libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1",
:libvirt__ipv6_prefix => "64"

vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults

defaultOSConfigure(vm)

scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts"
vm.provision "IPv6 Setup", type: "shell", path: scripts_location + "/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, CNI, vm.box]

install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
Expand Down Expand Up @@ -84,10 +84,7 @@ Vagrant.configure("2") do |config|
NODE_BOXES = NODE_BOXES.split(" ", -1)
end

# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
NODE_ROLES.each_with_index do |name, i|
config.vm.define name do |node|
roles = name.split("-", -1)
role_num = roles.pop.to_i
Expand Down
6 changes: 2 additions & 4 deletions tests/e2e/dualstack/Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,12 @@ def provision(vm, roles, role_num, node_num)
:libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1",
:libvirt__ipv6_prefix => "64"

vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults

defaultOSConfigure(vm)

scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts"
vm.provision "IPv6 Setup", type: "shell", path: scripts_location + "/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, CNI, vm.box]

install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
Expand Down Expand Up @@ -68,7 +68,6 @@ def provision(vm, roles, role_num, node_num)
cluster-cidr: 10.42.0.0/16,2001:cafe:42:0::/56
service-cidr: 10.43.0.0/16,2001:cafe:43:0::/112
cni: #{CNI}
kubelet-arg: "--node-ip=0.0.0.0" # Workaround for https://github.com/kubernetes/kubernetes/issues/111695
YAML
end
end
Expand All @@ -83,7 +82,6 @@ def provision(vm, roles, role_num, node_num)
node-ip: #{node_ip4},#{node_ip6}
server: https://#{NETWORK4_PREFIX}.100:9345
token: vagrant-rke2
kubelet-arg: "--node-ip=0.0.0.0" # Workaround for https://github.com/kubernetes/kubernetes/issues/111695
YAML
end
end
Expand Down
Loading

0 comments on commit 27e72f7

Please sign in to comment.