Skip to content

Commit

Permalink
Add e2e test about mixedos+flannel
Browse files Browse the repository at this point in the history
Signed-off-by: Manuel Buil <[email protected]>
  • Loading branch information
manuelbuil committed May 30, 2024
1 parent 71d00cc commit 7d981ab
Show file tree
Hide file tree
Showing 2 changed files with 317 additions and 0 deletions.
130 changes: 130 additions & 0 deletions tests/e2e/mixedosflannel/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0", "linux-agent-0", "windows-agent-0" ])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'jborean93/WindowsServer2022'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 3072).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.10"
install_type = ""

def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}"
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0"

vagrant_defaults = '../vagrantdefaults.rb'
load vagrant_defaults if File.exist?(vagrant_defaults)

defaultOSConfigure(vm)

if role.include?("windows")
if !RELEASE_VERSION.empty?
install_type = "Version=#{RELEASE_VERSION}"
else
vm.provision "shell", path: "../scripts/latest_commit.ps1", args: [GITHUB_BRANCH, "./rke2_commits.txt"]
install_type = "Commit=(Get-Content -TotalCount 1 ./rke2_commits.txt)"
end
else
if !RELEASE_VERSION.empty?
install_type = "INSTALL_RKE2_VERSION=#{RELEASE_VERSION}"
else
# Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
vm.provision "shell", path: "../scripts/latest_commit.sh", args: [GITHUB_BRANCH, "/tmp/rke2_commits"]
install_type = "INSTALL_RKE2_COMMIT=$(head\ -n\ 1\ /tmp/rke2_commits)"
end
vm.provision "shell", inline: "ping -c 2 rke2.io"
vm.provision "Create Calico Manifest", type: "shell", path: "../scripts/calico_manifestbgp.sh", args: [ "#{NETWORK_PREFIX}.1" ]
end

if role.include?("server") && role_num == 0
vm.provision :rke2, run: 'once' do |rke2|
rke2.env = %W[INSTALL_RKE2_TYPE=server #{install_type}]
rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
rke2.config = <<~YAML
write-kubeconfig-mode: '0644'
node-external-ip: #{NETWORK_PREFIX}.100
node-ip: #{NETWORK_PREFIX}.100
token: vagrant-rke2
cni: flannel
YAML
end
vm.provision "Install sonobuoy", type: "shell", path: "../scripts/install_sonobuoy.sh"
end
if role.include?("linux-agent")
vm.provision :rke2, run: 'once' do |rke2|
rke2.env = %W[INSTALL_RKE2_TYPE=agent #{install_type}]
rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
rke2.install_path = false
rke2.config = <<~YAML
write-kubeconfig-mode: '0644'
node-external-ip: #{node_ip}
node-ip: #{node_ip}
server: https://#{NETWORK_PREFIX}.100:9345
token: vagrant-rke2
YAML
end
end
if role.include?("windows-agent")
if !vm.box.match?(/Windows.*2022/) && !vm.box.match?(/Windows.*2019/)
puts "invalid box: " + vm.box + " found for windows agent"
abort
end

# For Windows GUI on virtualbox
# vm.provider "virtualbox" do |v|
# v.gui = true
# v.customize ["modifyvm", :id, "--vram", 128]
# v.customize ["modifyvm", :id, "--clipboard", "bidirectional"]
# v.customize ["modifyvm", :id, "--accelerate3d", "on"]
# v.customize ["modifyvm", :id, "--accelerate2dvideo", "on"]
# end
# If using libvirt, use virt-viewer for GUI after bring up the node
vm.provision :rke2, run: 'once' do |rke2|
rke2.env = [install_type]
rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
# rke2.skip_start = true
rke2.config = <<~YAML
node-external-ip: #{node_ip}
node-ip: #{node_ip}
server: https://#{NETWORK_PREFIX}.100:9345
token: vagrant-rke2
YAML
end
end
end

Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-rke2", "vagrant-reload"]
# For windows, just use the password not the private key
config.ssh.password = "vagrant"
# Default provider is libvirt, virtualbox is only provided as a backup
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
config.vm.provider "virtualbox" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
v.linked_clone = false
end

if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end

NODE_ROLES.each_with_index do |name, i|
config.vm.define name do |node|
role_num = name.split("-", -1).pop.to_i
provision(node.vm, name, role_num, i)
end
end
end
187 changes: 187 additions & 0 deletions tests/e2e/mixedosflannel/mixedosbgp_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
package mixedosflannel

import (
"flag"
"fmt"
"os"
"strconv"
"strings"
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/rancher/rke2/tests/e2e"
)

// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "operating system for linux nodes")
var serverCount = flag.Int("serverCount", 1, "number of server nodes")
var linuxAgentCount = flag.Int("linuxAgentCount", 1, "number of linux agent nodes")
var windowsAgentCount = flag.Int("windowsAgentCount", 1, "number of windows agent nodes")
var ci = flag.Bool("ci", false, "running on CI")

const defaultWindowsOS = "jborean93/WindowsServer2022"

func Test_E2EMixedOSFlannelValidation(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Validate MixedOS Flannel Test Suite", suiteConfig, reporterConfig)
}

func createMixedCluster(nodeOS string, serverCount, linuxAgentCount, windowsAgentCount int) ([]string, []string, []string, error) {
serverNodeNames := []string{}
for i := 0; i < serverCount; i++ {
serverNodeNames = append(serverNodeNames, "server-"+strconv.Itoa(i))
}
linuxAgentNames := []string{}
for i := 0; i < linuxAgentCount; i++ {
linuxAgentNames = append(linuxAgentNames, "linux-agent-"+strconv.Itoa(i))
}
windowsAgentNames := []string{}
for i := 0; i < linuxAgentCount; i++ {
windowsAgentNames = append(windowsAgentNames, "windows-agent-"+strconv.Itoa(i))
}
nodeRoles := strings.Join(serverNodeNames, " ") + " " + strings.Join(linuxAgentNames, " ") + " " + strings.Join(windowsAgentNames, " ")
nodeRoles = strings.TrimSpace(nodeRoles)
nodeBoxes := strings.Repeat(nodeOS+" ", serverCount+linuxAgentCount)
nodeBoxes += strings.Repeat(defaultWindowsOS+" ", windowsAgentCount)
nodeBoxes = strings.TrimSpace(nodeBoxes)

var testOptions string
for _, env := range os.Environ() {
if strings.HasPrefix(env, "E2E_") {
testOptions += " " + env
}
}

cmd := fmt.Sprintf("NODE_ROLES=\"%s\" NODE_BOXES=\"%s\" %s vagrant up &> vagrant.log", nodeRoles, nodeBoxes, testOptions)
fmt.Println(cmd)
if _, err := e2e.RunCommand(cmd); err != nil {
fmt.Println("Error Creating Cluster", err)
return nil, nil, nil, err
}
return serverNodeNames, linuxAgentNames, windowsAgentNames, nil
}

var (
kubeConfigFile string
serverNodeNames []string
linuxAgentNames []string
windowsAgentNames []string
)

var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify Basic Cluster Creation", Ordered, func() {

It("Starts up with no issues", func() {
var err error
serverNodeNames, linuxAgentNames, windowsAgentNames, err = createMixedCluster(*nodeOS, *serverCount, *linuxAgentCount, *windowsAgentCount)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Linux Agent Nodes:", linuxAgentNames)
fmt.Println("Windows Agent Nodes:", windowsAgentNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})

It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(serverNodeNames)+len(linuxAgentNames)+len(windowsAgentNames)).Should(Equal(len(nodes)))
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})

It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies internode connectivity over VXLAN", func() {
_, err := e2e.DeployWorkload("pod_client.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())

_, err = e2e.DeployWorkload("windows_app_deployment.yaml", kubeConfigFile)
Expect(err).NotTo(HaveOccurred())

// Wait for the pod_client pods to have an IP
Eventually(func() string {
ips, _ := e2e.PodIPsUsingLabel(kubeConfigFile, "app=client")
return ips[0].Ipv4
}, "120s", "10s").Should(ContainSubstring("10.42"), "failed getClientIPs")

// Wait for the windows_app_deployment pods to have an IP (We must wait 250s because it takes time)
Eventually(func() string {
ips, _ := e2e.PodIPsUsingLabel(kubeConfigFile, "app=windows-app")
return ips[0].Ipv4
}, "620s", "10s").Should(ContainSubstring("10.42"), "failed getClientIPs")

// Verify there are vxlan routes using vni=4096
cmdRoute := "ip route"
for _, node := range append(serverNodeNames, linuxAgentNames...) {
output, err := e2e.RunCmdOnNode(cmdRoute, node)
Expect(err).NotTo(HaveOccurred())
Expect(output).Should(ContainSubstring("flannel.4096"))
}

// Test Linux -> Windows communication
fmt.Println("Testing Linux -> Windows communication")
cmd := "kubectl exec svc/client-curl --kubeconfig=" + kubeConfigFile + " -- curl -m7 windows-app-svc:3000"
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "120s", "3s").Should(ContainSubstring("Welcome to PSTools for K8s Debugging"), "failed cmd: "+cmd)

// Test Windows -> Linux communication
fmt.Println("Testing Windows -> Linux communication")
cmd = "kubectl exec svc/windows-app-svc --kubeconfig=" + kubeConfigFile + " -- curl -m7 client-curl:8080"
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "20s", "3s").Should(ContainSubstring("Welcome to nginx!"), "failed cmd: "+cmd)

})
It("Runs the mixed os sonobuoy plugin", func() {
cmd := "sonobuoy run --kubeconfig=/etc/rancher/rke2/rke2.yaml --plugin my-sonobuoy-plugins/mixed-workload-e2e/mixed-workload-e2e.yaml --aggregator-node-selector kubernetes.io/os:linux --wait"
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed output:"+res)
cmd = "sonobuoy retrieve --kubeconfig=/etc/rancher/rke2/rke2.yaml"
testResultTar, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
cmd = "sonobuoy results " + testResultTar
res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
Expect(res).Should(ContainSubstring("Plugin: mixed-workload-e2e\nStatus: passed\n"))
})

})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})

0 comments on commit 7d981ab

Please sign in to comment.