diff --git a/.drone.yml b/.drone.yml index 2c35e75b34c..5597a96b8a1 100644 --- a/.drone.yml +++ b/.drone.yml @@ -20,9 +20,11 @@ steps: echo "Some files in PR are not ignored, $DIFF"; fi; when: - event: - - push - - pull_request + ref: + include: + - refs/heads/master + - refs/heads/release-* + - refs/pull/** - name: validate-release image: rancher/dapper:v0.5.5 @@ -53,6 +55,13 @@ steps: volumes: - name: docker path: /var/run/docker.sock + when: + ref: + include: + - refs/heads/master + - refs/heads/release-* + - refs/pull/** + - refs/tags/* - name: fossa image: rancher/drone-fossa:latest @@ -98,6 +107,11 @@ steps: when: instance: - drone-publish.rancher.io + ref: + include: + - refs/heads/master + - refs/heads/release-* + - refs/tags/* - name: test image: rancher/dapper:v0.5.5 @@ -113,6 +127,13 @@ steps: volumes: - name: docker path: /var/run/docker.sock + when: + ref: + include: + - refs/heads/master + - refs/heads/release-* + - refs/pull/** + - refs/tags/* - name: publish-image-runtime image: rancher/hardened-build-base:v1.20.4b2 @@ -201,9 +222,11 @@ steps: echo "Some files in PR are not ignored, $DIFF"; fi; when: - event: - - push - - pull_request + ref: + include: + - refs/heads/master + - refs/heads/release-* + - refs/pull/** - name: build image: rancher/dapper:v0.5.8 @@ -212,6 +235,13 @@ steps: volumes: - name: docker path: /var/run/docker.sock + when: + ref: + include: + - refs/heads/master + - refs/heads/release-* + - refs/pull/** + - refs/tags/* - name: package-images image: rancher/dapper:v0.5.8 @@ -295,9 +325,11 @@ steps: echo "Some files in PR are not ignored, $DIFF"; fi; when: - event: - - push - - pull_request + ref: + include: + - refs/heads/master + - refs/heads/release-* + - refs/pull/** - name: dispatch image: rancher/dapper:v0.5.5 @@ -350,9 +382,11 @@ steps: echo "Some files in PR are not ignored, $DIFF"; fi; when: - event: - - push - - pull_request + ref: + include: + - refs/heads/master + - refs/heads/release-* + - refs/pull/** - name: push-runtime-manifest image: plugins/manifest diff --git a/Dockerfile.windows b/Dockerfile.windows index be03aea6be7..70446d72c41 100644 --- a/Dockerfile.windows +++ b/Dockerfile.windows @@ -1,4 +1,4 @@ -FROM alpine:3.17 AS build +FROM alpine:3.18 AS build RUN apk --no-cache add \ curl \ diff --git a/channels.yaml b/channels.yaml index 29c55be1439..8a0799ee636 100644 --- a/channels.yaml +++ b/channels.yaml @@ -1,6 +1,6 @@ channels: - name: stable - latest: v1.25.9+rke2r1 + latest: v1.25.10+rke2r1 - name: latest latestRegexp: .* excludeRegexp: (^[^+]+-|v1\.25\.5\+rke2r1|v1\.26\.0\+rke2r1) diff --git a/install.sh b/install.sh index 2179c0e509c..ca00a78d09a 100755 --- a/install.sh +++ b/install.sh @@ -480,7 +480,17 @@ do_install_rpm() { repodir=/etc/zypp/repos.d fi if [ "${ID_LIKE%%[ ]*}" = "suse" ]; then + # create the /var/lib/rpm-state in SLE systems to fix the prein selinux macro + if [ "${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then + transactional_update_run="transactional-update --no-selfupdate -d run" + fi + ${transactional_update_run} mkdir -p /var/lib/rpm-state + # configure infix and rpm_installer rpm_site_infix=microos + if [ "${VARIANT_ID:-}" = sle-micro ]; then + rpm_site_infix=slemicro + package_installer=zypper + fi rpm_installer="zypper --gpg-auto-import-keys" if [ "${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then rpm_installer="transactional-update --no-selfupdate -d run ${rpm_installer}" @@ -488,12 +498,9 @@ do_install_rpm() { else maj_ver=$(echo "$VERSION_ID" | sed -E -e "s/^([0-9]+)\.?[0-9]*$/\1/") case ${maj_ver} in - 7|8) + 7|8|9) : ;; - 9) # We are currently using EL8 packages for EL9 as well - maj_ver="8" - ;; *) # In certain cases, like installing on Fedora, maj_ver will end up being something that is not 7 or 8 maj_ver="7" ;; @@ -549,6 +556,16 @@ gpgkey=https://${rpm_site}/public.key EOF fi + if rpm -q --quiet rke2-selinux; then + # remove rke2-selinux module in el9 before upgrade to allow container-selinux to upgrade safely + if check_available_upgrades container-selinux && check_available_upgrades rke2-selinux; then + MODULE_PRIORITY=$(semodule --list=full | grep rke2 | cut -f1 -d" ") + if [ -n "${MODULE_PRIORITY}" ]; then + semodule -X $MODULE_PRIORITY -r rke2 || true + fi + fi + fi + if [ -z "${INSTALL_RKE2_VERSION}" ] && [ -z "${INSTALL_RKE2_COMMIT}" ]; then ${rpm_installer} install -y "rke2-${INSTALL_RKE2_TYPE}" elif [ -n "${INSTALL_RKE2_COMMIT}" ]; then @@ -564,6 +581,21 @@ EOF fi } +check_available_upgrades() { + . /etc/os-release + set +e + if [ "${ID_LIKE%%[ ]*}" = "suse" ]; then + available_upgrades=$(zypper -q -t -s 11 se -s -u --type package $1 | tail -n 1 | grep -v "No matching" | awk '{print $3}') + else + available_upgrades=$(yum -q --refresh list $1 --upgrades | tail -n 1 | awk '{print $2}') + fi + set -e + if [ -n "${available_upgrades}" ]; then + return 0 + fi + return 1 +} + do_install_tar() { setup_tmp diff --git a/pkg/bootstrap/bootstrap.go b/pkg/bootstrap/bootstrap.go index 4c3e1395ef5..6d98b892d8c 100644 --- a/pkg/bootstrap/bootstrap.go +++ b/pkg/bootstrap/bootstrap.go @@ -93,6 +93,7 @@ func Stage(resolver *images.Resolver, nodeConfig *daemonconfig.Node, cfg cmds.Ag refBinDir := binDirForDigest(cfg.DataDir, refDigest) refChartsDir := chartsDirForDigest(cfg.DataDir, refDigest) + manifestsDir := manifestsDir(cfg.DataDir) imagesDir := imagesDir(cfg.DataDir) if dirExists(refBinDir) && dirExists(refChartsDir) { @@ -130,12 +131,18 @@ func Stage(resolver *images.Resolver, nodeConfig *daemonconfig.Node, cfg cmds.Ag } } + // preserve manifests directory mode when extracting, if it already exists + extractOptions := []extract.Option{} + if fi, err := os.Stat(manifestsDir); err == nil { + extractOptions = append(extractOptions, extract.WithMode(fi.Mode())) + } + // Extract binaries and charts extractPaths := map[string]string{ "/bin": refBinDir, "/charts": refChartsDir, } - if err := extract.ExtractDirs(img, extractPaths); err != nil { + if err := extract.ExtractDirs(img, extractPaths, extractOptions...); err != nil { return "", errors.Wrap(err, "failed to extract runtime image") } // Ensure correct permissions on bin dir diff --git a/tests/acceptance/Makefile b/tests/acceptance/Makefile index f4383965f4f..59760859bcf 100644 --- a/tests/acceptance/Makefile +++ b/tests/acceptance/Makefile @@ -3,7 +3,7 @@ include ./config.mk TAGNAME ?= default test-env-up: - @cd ../.. && docker build . -q -f ./tests/acceptance/shared/scripts/Dockerfile.build -t rke2-automated-${TAGNAME} + @cd ../.. && docker build . -q -f ./tests/acceptance/scripts/Dockerfile.build -t rke2-automated-${TAGNAME} # -d .PHONY: test-run diff --git a/tests/acceptance/core/service/assert/host.go b/tests/acceptance/core/service/assert/host.go index d5ac9ccffad..85706a5803c 100644 --- a/tests/acceptance/core/service/assert/host.go +++ b/tests/acceptance/core/service/assert/host.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/gomega" ) @@ -17,7 +17,7 @@ import ( func CheckComponentCmdHost(cmd string, asserts ...string) { Eventually(func() error { fmt.Println("Executing cmd: ", cmd) - res, err := util.RunCommandHost(cmd) + res, err := shared.RunCommandHost(cmd) if err != nil { return fmt.Errorf("error on RunCommandHost: %v", err) } diff --git a/tests/acceptance/core/service/assert/node.go b/tests/acceptance/core/service/assert/node.go index 03eb46f157b..4ebbbcf3928 100644 --- a/tests/acceptance/core/service/assert/node.go +++ b/tests/acceptance/core/service/assert/node.go @@ -4,49 +4,49 @@ import ( "fmt" "github.com/rancher/rke2/tests/acceptance/core/service/customflag" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -type NodeAssertFunc func(g Gomega, node util.Node) +type NodeAssertFunc func(g Gomega, node shared.Node) // NodeAssertVersionTypeUpgraded custom assertion func that asserts that node version is as expected -func NodeAssertVersionTypeUpgraded(installType *customflag.InstallTypeValue) NodeAssertFunc { +func NodeAssertVersionTypeUpgraded(installType *customflag.InstallTypeValueFlag) NodeAssertFunc { if installType.Version != "" { fmt.Printf("Asserting Version: %s\n", installType.Version) - return func(g Gomega, node util.Node) { + return func(g Gomega, node shared.Node) { g.Expect(node.Version).Should(Equal(installType.Version), "Nodes should all be upgraded to the specified version", node.Name) } } if installType.Commit != "" { - version := util.GetRke2Version() + version := shared.GetRke2Version() fmt.Printf("Asserting Commit: %s\n Version: %s", installType.Commit, version) - return func(g Gomega, node util.Node) { + return func(g Gomega, node shared.Node) { g.Expect(version).Should(ContainSubstring(installType.Commit), "Nodes should all be upgraded to the specified commit", node.Name) } } - return func(g Gomega, node util.Node) { + return func(g Gomega, node shared.Node) { GinkgoT().Errorf("no version or commit specified for upgrade assertion") } } // NodeAssertVersionUpgraded custom assertion func that asserts that node version is as expected func NodeAssertVersionUpgraded() NodeAssertFunc { - return func(g Gomega, node util.Node) { - g.Expect(&customflag.UpgradeVersionSUC).Should(ContainSubstring(node.Version), + return func(g Gomega, node shared.Node) { + g.Expect(&customflag.ServiceFlag.UpgradeVersionSUC).Should(ContainSubstring(node.Version), "Nodes should all be upgraded to the specified version", node.Name) } } // NodeAssertReadyStatus custom assertion func that asserts that the node is in Ready state. func NodeAssertReadyStatus() NodeAssertFunc { - return func(g Gomega, node util.Node) { + return func(g Gomega, node shared.Node) { g.Expect(node.Status).Should(Equal("Ready"), "Nodes should all be in Ready state") } @@ -57,7 +57,7 @@ func NodeAssertReadyStatus() NodeAssertFunc { func CheckComponentCmdNode(cmd, assert, ip string) { Eventually(func(g Gomega) { fmt.Println("Executing cmd: ", cmd) - res, err := util.RunCommandOnNode(cmd, ip) + res, err := shared.RunCommandOnNode(cmd, ip) if err != nil { return } @@ -66,14 +66,3 @@ func CheckComponentCmdNode(cmd, assert, ip string) { fmt.Println("Result:", res+"Matched with assert:", assert) }, "420s", "3s").Should(Succeed()) } - -// NodeAssertCount custom assertion func that asserts that node count is as expected -func NodeAssertCount() NodeAssertFunc { - return func(g Gomega, node util.Node) { - expectedNodeCount := util.NumServers + util.NumAgents - nodes, err := util.Nodes(false) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(nodes)).To(Equal(expectedNodeCount), - "Number of nodes should match the spec") - } -} diff --git a/tests/acceptance/core/service/assert/pod.go b/tests/acceptance/core/service/assert/pod.go index c588725765b..401696cc25d 100644 --- a/tests/acceptance/core/service/assert/pod.go +++ b/tests/acceptance/core/service/assert/pod.go @@ -4,20 +4,20 @@ import ( "fmt" "strings" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/gomega" "github.com/onsi/gomega/types" ) -type PodAssertFunc func(g Gomega, pod util.Pod) +type PodAssertFunc func(g Gomega, pod shared.Pod) var completedAssert = "Completed" // PodAssertRestart custom assertion func that asserts that pods are not restarting with no reason // controller, scheduler, helm-install pods can be restarted occasionally when cluster started if only once func PodAssertRestart() PodAssertFunc { - return func(g Gomega, pod util.Pod) { + return func(g Gomega, pod shared.Pod) { if strings.Contains(pod.NameSpace, "kube-system") && strings.Contains(pod.Name, "controller") && strings.Contains(pod.Name, "scheduler") { @@ -31,7 +31,7 @@ func PodAssertRestart() PodAssertFunc { // PodAssertReady custom assertion func that asserts that the pod is // with correct numbers of ready containers. func PodAssertReady() PodAssertFunc { - return func(g Gomega, pod util.Pod) { + return func(g Gomega, pod shared.Pod) { g.ExpectWithOffset(1, pod.Ready).To(checkReadyFields(), "should have equal values in n/n format") } @@ -55,7 +55,7 @@ func checkReadyFields() types.GomegaMatcher { // PodAssertStatus custom assertion that asserts that pod status is completed or in some cases // apply pods can have an error status func PodAssertStatus() PodAssertFunc { - return func(g Gomega, pod util.Pod) { + return func(g Gomega, pod shared.Pod) { if strings.Contains(pod.Name, "helm-install") { g.Expect(pod.Status).Should(Equal(completedAssert), pod.Name) } else if strings.Contains(pod.Name, "apply") && @@ -65,7 +65,7 @@ func PodAssertStatus() PodAssertFunc { Equal(completedAssert), ), pod.Name) } else { - g.Expect(pod.Status).Should(Equal(util.RunningAssert), pod.Name) + g.Expect(pod.Status).Should(Equal(shared.RunningAssert), pod.Name) } } } @@ -73,9 +73,9 @@ func PodAssertStatus() PodAssertFunc { // CheckPodStatusRunning asserts that the pod is running with the specified label = app name. func CheckPodStatusRunning(name, namespace, assert string) { cmd := "kubectl get pods -n " + namespace + " -o=name -l k8s-app=" + name + - " --field-selector=status.phase=Running --kubeconfig=" + util.KubeConfigFile + " --field-selector=status.phase=Running --kubeconfig=" + shared.KubeConfigFile Eventually(func(g Gomega) { - res, err := util.RunCommandHost(cmd) + res, err := shared.RunCommandHost(cmd) if err != nil { return } diff --git a/tests/acceptance/core/service/assert/validate.go b/tests/acceptance/core/service/assert/validate.go index a2680366ee1..6a1edf52bc3 100644 --- a/tests/acceptance/core/service/assert/validate.go +++ b/tests/acceptance/core/service/assert/validate.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" ) // validate calls runAssertion for each cmd/assert pair @@ -78,7 +78,7 @@ func runAssertion( // Need to send kubeconfig file. func ValidateOnHost(args ...string) error { exec := func(cmd string) (string, error) { - return util.RunCommandHost(cmd) + return shared.RunCommandHost(cmd) } return validate(exec, args...) } @@ -87,7 +87,7 @@ func ValidateOnHost(args ...string) error { // The last argument should be the assertion. func ValidateOnNode(ip string, args ...string) error { exec := func(cmd string) (string, error) { - return util.RunCommandOnNode(cmd, ip) + return shared.RunCommandOnNode(cmd, ip) } return validate(exec, args...) } diff --git a/tests/acceptance/core/service/customflag/model.go b/tests/acceptance/core/service/customflag/model.go index 7218517f6b4..aab09daed6d 100644 --- a/tests/acceptance/core/service/customflag/model.go +++ b/tests/acceptance/core/service/customflag/model.go @@ -6,20 +6,23 @@ import ( "strings" ) -var ( - UpgradeVersionSUC UpgradeVersion - InstallType InstallTypeValue - InstallUpgradeFlag MultiValueFlag - TestCase TestConfigFlag -) +var ServiceFlag FlagConfig + +type FlagConfig struct { + InstallType InstallTypeValueFlag + InstallUpgrade MultiValueFlag + TestCase TestConfigFlag + ClusterConfig ClusterConfigFlag + UpgradeVersionSUC UpgradeVersionFlag +} -// UpgradeVersion is a custom type to use upgradeVersionSUC flag -type UpgradeVersion struct { +// UpgradeVersionFlag is a custom type to use upgradeVersionSUC flag +type UpgradeVersionFlag struct { Version string } -// InstallTypeValue is a customflag type that can be used to parse the installation type -type InstallTypeValue struct { +// InstallTypeValueFlag is a customflag type that can be used to parse the installation type +type InstallTypeValueFlag struct { Version string Commit string } @@ -37,6 +40,13 @@ type TestCaseFlagType func(deployWorkload bool) // MultiValueFlag is a customflag type that can be used to parse multiple values type MultiValueFlag []string +type DestroyFlag bool + +// ClusterConfigFlag is a customFlag type that can be used to change some cluster config +type ClusterConfigFlag struct { + Destroy DestroyFlag +} + // String returns the string representation of the MultiValueFlag func (m *MultiValueFlag) String() string { return strings.Join(*m, ",") @@ -74,12 +84,12 @@ func (t *TestConfigFlag) Set(value string) error { } // String returns the string representation of the InstallTypeValue -func (i *InstallTypeValue) String() string { +func (i *InstallTypeValueFlag) String() string { return fmt.Sprintf("Version: %s, Commit: %s", i.Version, i.Commit) } // Set parses the input string and sets the Version or Commit field using Set customflag interface -func (i *InstallTypeValue) Set(value string) error { +func (i *InstallTypeValueFlag) Set(value string) error { parts := strings.Split(value, "=") if len(parts) == 2 { @@ -99,12 +109,12 @@ func (i *InstallTypeValue) Set(value string) error { } // String returns the string representation of the UpgradeVersion for SUC upgrade -func (t *UpgradeVersion) String() string { +func (t *UpgradeVersionFlag) String() string { return t.Version } // Set parses the input string and sets the Version field for SUC upgrades -func (t *UpgradeVersion) Set(value string) error { +func (t *UpgradeVersionFlag) Set(value string) error { if strings.HasPrefix(value, "v") && strings.HasSuffix(value, "rke2r1") { t.Version = value } else { @@ -113,3 +123,19 @@ func (t *UpgradeVersion) Set(value string) error { return nil } + +// String returns the string representation of the DestroyFlag +func (d *DestroyFlag) String() string { + return fmt.Sprintf("%v", *d) +} + +// Set parses the customFlag value for DestroyFlag +func (d *DestroyFlag) Set(value string) error { + v, err := strconv.ParseBool(value) + if err != nil { + return err + } + *d = DestroyFlag(v) + + return nil +} diff --git a/tests/acceptance/core/service/factory/cluster.go b/tests/acceptance/core/service/factory/cluster.go index 3248eb3bda0..dbb3626462b 100644 --- a/tests/acceptance/core/service/factory/cluster.go +++ b/tests/acceptance/core/service/factory/cluster.go @@ -4,38 +4,52 @@ import ( "fmt" "path/filepath" "strconv" + "sync" "github.com/gruntwork-io/terratest/modules/terraform" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/ginkgo/v2" ) -func BuildCluster(g GinkgoTInterface, destroy bool) (string, error) { - tfDir, err := filepath.Abs(util.BasePath() + "/modules") +type Cluster struct { + Status string + ServerIPs string + AgentIPs string + NumServers int + NumAgents int +} + +var ( + once sync.Once + cluster *Cluster +) + +// NewCluster creates a new cluster and returns his values from terraform config and vars +func NewCluster(g GinkgoTInterface) (*Cluster, error) { + tfDir, err := filepath.Abs(shared.BasePath() + "/acceptance/modules") if err != nil { - return "", err + return nil, err } - varDir, err := filepath.Abs(util.BasePath() + "/modules/config/local.tfvars") + varDir, err := filepath.Abs(shared.BasePath() + "/acceptance/modules/config/local.tfvars") if err != nil { - return "", err + return nil, err } - terraformOptions := terraform.Options{ + terraformOptions := &terraform.Options{ TerraformDir: tfDir, VarFiles: []string{varDir}, } - util.NumServers, err = strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, - "no_of_server_nodes")) + NumServers, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, "no_of_server_nodes")) if err != nil { - return "", err + return nil, err } - util.NumAgents, err = strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, - "no_of_worker_nodes")) + + NumAgents, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, "no_of_worker_nodes")) if err != nil { - return "", err + return nil, err } splitRoles := terraform.GetVariableAsStringFromVarFile(g, varDir, "split_roles") @@ -43,46 +57,81 @@ func BuildCluster(g GinkgoTInterface, destroy bool) (string, error) { etcdNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, "etcd_only_nodes")) if err != nil { - return "", err + return nil, err } etcdCpNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, "etcd_cp_nodes")) if err != nil { - return "", err + return nil, err } etcdWorkerNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, "etcd_worker_nodes")) if err != nil { - return "", err + return nil, err } cpNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, "cp_only_nodes")) if err != nil { - return "", err + return nil, err } cpWorkerNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, "cp_worker_nodes")) if err != nil { - return "", err + return nil, err } - util.NumServers = util.NumServers + etcdNodes + etcdCpNodes + etcdWorkerNodes + + NumServers = NumServers + etcdNodes + etcdCpNodes + etcdWorkerNodes + +cpNodes + cpWorkerNodes } - util.AwsUser = terraform.GetVariableAsStringFromVarFile(g, varDir, "aws_user") - util.AccessKey = terraform.GetVariableAsStringFromVarFile(g, varDir, "access_key") - fmt.Printf("\nCreating Cluster") + fmt.Println("Creating Cluster") + + terraform.InitAndApply(g, terraformOptions) + + ServerIPs := terraform.Output(g, terraformOptions, "master_ips") + AgentIPs := terraform.Output(g, terraformOptions, "worker_ips") + + shared.AwsUser = terraform.GetVariableAsStringFromVarFile(g, varDir, "aws_user") + shared.AccessKey = terraform.GetVariableAsStringFromVarFile(g, varDir, "access_key") + shared.KubeConfigFile = terraform.Output(g, terraformOptions, "kubeconfig") + return &Cluster{ + Status: "cluster created", + ServerIPs: ServerIPs, + AgentIPs: AgentIPs, + NumServers: NumServers, + NumAgents: NumAgents, + }, nil +} + +// GetCluster returns a singleton cluster +func GetCluster(g GinkgoTInterface) *Cluster { + var err error - if destroy { - fmt.Printf("Cluster is being deleted") - terraform.Destroy(g, &terraformOptions) - return "cluster destroyed", err + once.Do(func() { + cluster, err = NewCluster(g) + if err != nil { + g.Errorf("error getting cluster: %v", err) + } + }) + return cluster +} + +// DestroyCluster destroys the cluster and returns a message +func DestroyCluster(g GinkgoTInterface) (string, error) { + basepath := shared.BasePath() + tfDir, err := filepath.Abs(basepath + "/modules") + if err != nil { + return "", err + } + varDir, err := filepath.Abs(basepath + "/modules/config/local.tfvars") + if err != nil { + return "", err } - terraform.InitAndApply(g, &terraformOptions) - util.KubeConfigFile = terraform.Output(g, &terraformOptions, "kubeconfig") - util.ServerIPs = terraform.Output(g, &terraformOptions, "master_ips") - util.AgentIPs = terraform.Output(g, &terraformOptions, "worker_ips") + terraformOptions := terraform.Options{ + TerraformDir: tfDir, + VarFiles: []string{varDir}, + } + terraform.Destroy(g, &terraformOptions) - return "cluster created", nil + return "cluster destroyed", nil } diff --git a/tests/acceptance/core/service/template/helper.go b/tests/acceptance/core/service/template/helper.go index eea7f75e99b..50521d76129 100644 --- a/tests/acceptance/core/service/template/helper.go +++ b/tests/acceptance/core/service/template/helper.go @@ -6,7 +6,7 @@ import ( "sync" "github.com/rancher/rke2/tests/acceptance/core/testcase" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/ginkgo/v2" ) @@ -77,7 +77,7 @@ func joinCommands(cmd, kubeconfigFlag string) string { // getIPs gets the IPs of the nodes func getIPs() (ips []string, err error) { - ips = util.FetchNodeExternalIP() + ips = shared.FetchNodeExternalIP() return ips, nil } diff --git a/tests/acceptance/core/service/template/model.go b/tests/acceptance/core/service/template/model.go index 76ccefecfd0..6fbd1d8cd09 100644 --- a/tests/acceptance/core/service/template/model.go +++ b/tests/acceptance/core/service/template/model.go @@ -1,5 +1,7 @@ package template +var TestMapFlag TestMap + // VersionTestTemplate represents a version test scenario with test configurations and commands. type VersionTestTemplate struct { Description string @@ -16,9 +18,16 @@ type RunCmd struct { // TestMap represents a single test command with key:value pairs. type TestMap struct { - Cmd string - ExpectedValue string - ExpectedValueUpgrade string + Cmd string + ExpectedValue string + ExpectedValueUpgrade string + ExpectedValueUpgradedHost string + ExpectedValueUpgradedNode string + CmdHost string + ExpectedValueHost string + CmdNode string + ExpectedValueNode string + Description string } // TestConfig represents the testcase function configuration diff --git a/tests/acceptance/core/service/template/processor.go b/tests/acceptance/core/service/template/processor.go index 9b67e0e2811..4265a5c9a56 100644 --- a/tests/acceptance/core/service/template/processor.go +++ b/tests/acceptance/core/service/template/processor.go @@ -5,7 +5,7 @@ import ( "sync" "github.com/rancher/rke2/tests/acceptance/core/service/assert" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/ginkgo/v2" ) @@ -50,7 +50,7 @@ func processOnNode(resultChan chan error, ip, cmd, expectedValue string) { return } - version := util.GetRke2Version() + version := shared.GetRke2Version() fmt.Printf("\n Checking version: %s on ip: %s \n "+ "Command: %s, \n Expected Value: %s", version, ip, cmd, expectedValue) @@ -79,10 +79,10 @@ func processOnHost(resultChan chan error, ip, cmd, expectedValue string) { return } - kubeconfigFlag := " --kubeconfig=" + util.KubeConfigFile + kubeconfigFlag := " --kubeconfig=" + shared.KubeConfigFile fullCmd := joinCommands(cmd, kubeconfigFlag) - version := util.GetRke2Version() + version := shared.GetRke2Version() fmt.Printf("\n Checking version: %s on ip: %s \n "+ "Command: %s, \n Expected Value: %s", version, ip, fullCmd, expectedValue) diff --git a/tests/acceptance/core/testcase/cluster.go b/tests/acceptance/core/testcase/cluster.go index 8d0453926a4..2282840c892 100644 --- a/tests/acceptance/core/testcase/cluster.go +++ b/tests/acceptance/core/testcase/cluster.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/rancher/rke2/tests/acceptance/core/service/factory" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -12,22 +12,20 @@ import ( // TestBuildCluster test the creation of a cluster using terraform func TestBuildCluster(g GinkgoTInterface, destroy bool) { - status, err := factory.BuildCluster(g, destroy) - if err != nil { - return - } - Expect(status).To(Equal("cluster created")) + cluster := factory.GetCluster(g) + + Expect(cluster.Status).To(Equal("cluster created")) - util.PrintFileContents(util.KubeConfigFile) - Expect(util.KubeConfigFile).ShouldNot(BeEmpty()) - Expect(util.ServerIPs).ShouldNot(BeEmpty()) + shared.PrintFileContents(shared.KubeConfigFile) + Expect(shared.KubeConfigFile).ShouldNot(BeEmpty()) + Expect(cluster.ServerIPs).ShouldNot(BeEmpty()) - fmt.Println("Server Node IPS:", util.ServerIPs) - fmt.Println("Agent Node IPS:", util.AgentIPs) + fmt.Println("Server Node IPS:", cluster.ServerIPs) + fmt.Println("Agent Node IPS:", cluster.AgentIPs) - if util.NumAgents > 0 { - Expect(util.AgentIPs).ShouldNot(BeEmpty()) + if cluster.NumAgents > 0 { + Expect(cluster.AgentIPs).ShouldNot(BeEmpty()) } else { - Expect(util.AgentIPs).Should(BeEmpty()) + Expect(cluster.AgentIPs).Should(BeEmpty()) } } diff --git a/tests/acceptance/core/testcase/coredns.go b/tests/acceptance/core/testcase/coredns.go index e135122ba5f..dd57cc766b8 100644 --- a/tests/acceptance/core/testcase/coredns.go +++ b/tests/acceptance/core/testcase/coredns.go @@ -2,29 +2,32 @@ package testcase import ( "github.com/rancher/rke2/tests/acceptance/core/service/assert" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/gomega" ) +var ExecDnsUtils = "kubectl exec -n auto-dns -t dnsutils --kubeconfig=" +var Nslookup = "kubernetes.default.svc.cluster.local" + func TestCoredns(deployWorkload bool) { if deployWorkload { - _, err := util.ManageWorkload("create", "dnsutils.yaml") + _, err := shared.ManageWorkload("create", "dnsutils.yaml") Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed", err) } - _, err := util.AddHelmRepo("traefik", "https://helm.traefik.io/traefik") + _, err := shared.AddHelmRepo("traefik", "https://helm.traefik.io/traefik") if err != nil { return } - err = assert.ValidateOnHost(util.ExecDnsUtils+util.KubeConfigFile+ - " -- nslookup kubernetes.default", util.Nslookup) + err = assert.ValidateOnHost(ExecDnsUtils+shared.KubeConfigFile+ + " -- nslookup kubernetes.default", Nslookup) if err != nil { return } - ips := util.FetchNodeExternalIP() + ips := shared.FetchNodeExternalIP() for _, ip := range ips { err = assert.ValidateOnHost( ip, diff --git a/tests/acceptance/core/testcase/daemonset.go b/tests/acceptance/core/testcase/daemonset.go index bf930e06e69..a18641dfa48 100644 --- a/tests/acceptance/core/testcase/daemonset.go +++ b/tests/acceptance/core/testcase/daemonset.go @@ -1,22 +1,22 @@ package testcase import ( - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/gomega" ) func TestDaemonset(deployWorkload bool) { if deployWorkload { - _, err := util.ManageWorkload("create", "daemonset.yaml") + _, err := shared.ManageWorkload("create", "daemonset.yaml") Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed") } - nodes, _ := util.WorkerNodes(false) - pods, _ := util.Pods(false) + nodes, _ := shared.WorkerNodes(false) + pods, _ := shared.Pods(false) Eventually(func(g Gomega) { - count := util.CountOfStringInSlice("test-daemonset", pods) + count := shared.CountOfStringInSlice("test-daemonset", pods) g.Expect(count).Should(Equal(len(nodes)), "Daemonset pod count does not match node count") }, "420s", "5s").Should(Succeed()) diff --git a/tests/acceptance/core/testcase/ingressdns.go b/tests/acceptance/core/testcase/ingressdns.go index ce5887ebcc1..9f1b12e9f05 100644 --- a/tests/acceptance/core/testcase/ingressdns.go +++ b/tests/acceptance/core/testcase/ingressdns.go @@ -2,30 +2,32 @@ package testcase import ( "github.com/rancher/rke2/tests/acceptance/core/service/assert" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) +var Running = "Running" + func TestIngress(deployWorkload bool) { var ingressIps []string if deployWorkload { - _, err := util.ManageWorkload("create", "ingress.yaml") + _, err := shared.ManageWorkload("create", "ingress.yaml") Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") } getIngressRunning := "kubectl get pods -n auto-ingress -l k8s-app=nginx-app-ingress --field-selector=status.phase=Running --kubeconfig=" - err := assert.ValidateOnHost(getIngressRunning+util.KubeConfigFile, util.Running) + err := assert.ValidateOnHost(getIngressRunning+shared.KubeConfigFile, Running) if err != nil { GinkgoT().Errorf("Error: %v", err) } - nodes, err := util.Nodes(false) + nodes, err := shared.Nodes(false) Expect(err).NotTo(HaveOccurred()) Eventually(func(Gomega) bool { - ingressIps, err = util.FetchIngressIP("auto-ingress") + ingressIps, err = shared.FetchIngressIP("auto-ingress") if err != nil { return false } @@ -45,19 +47,19 @@ func TestIngress(deployWorkload bool) { func TestDnsAccess(deployWorkload bool) { if deployWorkload { - _, err := util.ManageWorkload("create", "dnsutils.yaml") + _, err := shared.ManageWorkload("create", "dnsutils.yaml") Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed", err) } getDnsUtils := "kubectl get pods -n auto-dns dnsutils --kubeconfig=" - err := assert.ValidateOnHost(getDnsUtils+util.KubeConfigFile, util.Running) + err := assert.ValidateOnHost(getDnsUtils+shared.KubeConfigFile, Running) if err != nil { GinkgoT().Errorf("Error: %v", err) } assert.CheckComponentCmdHost( - util.ExecDnsUtils+util.KubeConfigFile+" -- nslookup kubernetes.default", - util.Nslookup, + ExecDnsUtils+shared.KubeConfigFile+" -- nslookup kubernetes.default", + Nslookup, ) } diff --git a/tests/acceptance/core/testcase/node.go b/tests/acceptance/core/testcase/node.go index 6f0de39fe5a..3d8ff153eb6 100644 --- a/tests/acceptance/core/testcase/node.go +++ b/tests/acceptance/core/testcase/node.go @@ -4,8 +4,10 @@ import ( "fmt" "github.com/rancher/rke2/tests/acceptance/core/service/assert" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/core/service/factory" + "github.com/rancher/rke2/tests/acceptance/shared" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -14,11 +16,12 @@ func TestNodeStatus( nodeAssertReadyStatus assert.NodeAssertFunc, nodeAssertVersion assert.NodeAssertFunc, ) { + cluster := factory.GetCluster(GinkgoT()) fmt.Printf("\nFetching node status\n") - expectedNodeCount := util.NumServers + util.NumAgents + expectedNodeCount := cluster.NumServers + cluster.NumAgents Eventually(func(g Gomega) { - nodes, err := util.Nodes(false) + nodes, err := shared.Nodes(false) g.Expect(err).NotTo(HaveOccurred()) g.Expect(len(nodes)).To(Equal(expectedNodeCount), "Number of nodes should match the spec") diff --git a/tests/acceptance/core/testcase/pod.go b/tests/acceptance/core/testcase/pod.go index dbda0917fbf..6bd4d8c62e5 100644 --- a/tests/acceptance/core/testcase/pod.go +++ b/tests/acceptance/core/testcase/pod.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/rancher/rke2/tests/acceptance/core/service/assert" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/gomega" ) @@ -19,7 +19,7 @@ func TestPodStatus( fmt.Printf("\nFetching pod status\n") Eventually(func(g Gomega) { - pods, err := util.Pods(false) + pods, err := shared.Pods(false) g.Expect(err).NotTo(HaveOccurred()) for _, pod := range pods { @@ -32,7 +32,7 @@ func TestPodStatus( Equal("Completed"), ), pod.Name) } else { - g.Expect(pod.Status).Should(Equal(util.Running), pod.Name) + g.Expect(pod.Status).Should(Equal(Running), pod.Name) if podAssertRestarts != nil { podAssertRestarts(g, pod) } diff --git a/tests/acceptance/core/testcase/service.go b/tests/acceptance/core/testcase/service.go index f487e914521..485721d1962 100644 --- a/tests/acceptance/core/testcase/service.go +++ b/tests/acceptance/core/testcase/service.go @@ -2,7 +2,7 @@ package testcase import ( "github.com/rancher/rke2/tests/acceptance/core/service/assert" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -10,20 +10,20 @@ import ( func TestServiceClusterIp(deployWorkload bool) { if deployWorkload { - _, err := util.ManageWorkload("create", "clusterip.yaml") + _, err := shared.ManageWorkload("create", "clusterip.yaml") Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed") } getClusterIp := "kubectl get pods -n auto-clusterip -l k8s-app=nginx-app-clusterip" + " --field-selector=status.phase=Running --kubeconfig=" - err := assert.ValidateOnHost(getClusterIp+util.KubeConfigFile, util.Running) + err := assert.ValidateOnHost(getClusterIp+shared.KubeConfigFile, Running) if err != nil { GinkgoT().Errorf("Error: %v", err) } - clusterip, port, _ := util.FetchClusterIP("auto-clusterip", + clusterip, port, _ := shared.FetchClusterIP("auto-clusterip", "nginx-clusterip-svc") - nodeExternalIP := util.FetchNodeExternalIP() + nodeExternalIP := shared.FetchNodeExternalIP() for _, ip := range nodeExternalIP { err = assert.ValidateOnNode(ip, "curl -sL --insecure http://"+clusterip+ ":"+port+"/name.html", "test-clusterip") @@ -35,14 +35,14 @@ func TestServiceClusterIp(deployWorkload bool) { func TestServiceNodePort(deployWorkload bool) { if deployWorkload { - _, err := util.ManageWorkload("create", "nodeport.yaml") + _, err := shared.ManageWorkload("create", "nodeport.yaml") Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed") } - nodeExternalIP := util.FetchNodeExternalIP() + nodeExternalIP := shared.FetchNodeExternalIP() getNodePortSVC := "kubectl get service -n auto-nodeport nginx-nodeport-svc" + " --output jsonpath={.spec.ports[0].nodePort} --kubeconfig=" - nodeport, err := util.RunCommandHost(getNodePortSVC + util.KubeConfigFile) + nodeport, err := shared.RunCommandHost(getNodePortSVC + shared.KubeConfigFile) if err != nil { GinkgoT().Errorf("Error: %v", err) } diff --git a/tests/acceptance/core/testcase/upgradecluster.go b/tests/acceptance/core/testcase/upgradecluster.go index fda1cc533bd..f3a7c99ddec 100644 --- a/tests/acceptance/core/testcase/upgradecluster.go +++ b/tests/acceptance/core/testcase/upgradecluster.go @@ -9,7 +9,8 @@ import ( "github.com/rancher/rke2/tests/acceptance/core/service/assert" "github.com/rancher/rke2/tests/acceptance/core/service/customflag" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/core/service/factory" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -19,20 +20,20 @@ import ( func TestUpgradeClusterSUC(version string) error { fmt.Printf("\nUpgrading cluster to version: %s\n", version) - _, err := util.ManageWorkload("create", "suc.yaml") + _, err := shared.ManageWorkload("create", "suc.yaml") Expect(err).NotTo(HaveOccurred(), "system-upgrade-controller manifest did not deploy successfully") getPodsSystemUpgrade := "kubectl get pods -n system-upgrade --kubeconfig=" assert.CheckComponentCmdHost( - getPodsSystemUpgrade+util.KubeConfigFile, + getPodsSystemUpgrade+shared.KubeConfigFile, "system-upgrade-controller", - util.Running, + Running, ) Expect(err).NotTo(HaveOccurred()) - originalFilePath := util.BasePath() + "/fixtures/workloads" + "/upgrade-plan.yaml" - newFilePath := util.BasePath() + "/fixtures/workloads" + "/plan.yaml" + originalFilePath := shared.BasePath() + "/fixtures/workloads" + "/upgrade-plan.yaml" + newFilePath := shared.BasePath() + "/fixtures/workloads" + "/plan.yaml" content, err := os.ReadFile(originalFilePath) if err != nil { @@ -45,7 +46,7 @@ func TestUpgradeClusterSUC(version string) error { return fmt.Errorf("failed to write file: %s", err) } - _, err = util.ManageWorkload("create", "plan.yaml") + _, err = shared.ManageWorkload("create", "plan.yaml") Expect(err).NotTo(HaveOccurred(), "failed to upgrade cluster.") return nil @@ -56,21 +57,22 @@ func TestUpgradeClusterManually(version string) error { if version == "" { return fmt.Errorf("please provide a non-empty rke2 version to upgrade to") } + cluster := factory.GetCluster(GinkgoT()) - serverIPs := strings.Split(util.ServerIPs, ",") - agentIPs := strings.Split(util.AgentIPs, ",") + serverIPs := strings.Split(cluster.ServerIPs, ",") + agentIPs := strings.Split(cluster.AgentIPs, ",") - if util.NumServers == 0 && util.NumAgents == 0 { + if cluster.NumServers == 0 && cluster.NumAgents == 0 { return fmt.Errorf("no nodes found to upgrade") } - if util.NumServers > 0 { + if cluster.NumServers > 0 { if err := upgradeServer(version, serverIPs); err != nil { return err } } - if util.NumAgents > 0 { + if cluster.NumAgents > 0 { if err := upgradeAgent(version, agentIPs); err != nil { return err } @@ -86,10 +88,10 @@ func upgradeServer(installType string, serverIPs []string) error { for _, ip := range serverIPs { switch { - case customflag.InstallType.Version != "": - installType = fmt.Sprintf("INSTALL_RKE2_VERSION=%s", customflag.InstallType.Version) - case customflag.InstallType.Commit != "": - installType = fmt.Sprintf("INSTALL_RKE2_COMMIT=%s", customflag.InstallType.Commit) + case customflag.ServiceFlag.InstallType.Version != "": + installType = fmt.Sprintf("INSTALL_RKE2_VERSION=%s", customflag.ServiceFlag.InstallType.Version) + case customflag.ServiceFlag.InstallType.Commit != "": + installType = fmt.Sprintf("INSTALL_RKE2_COMMIT=%s", customflag.ServiceFlag.InstallType.Commit) } installRke2Server := "sudo curl -sfL https://get.rke2.io | sudo %s INSTALL_RKE2_TYPE=server sh - " @@ -100,7 +102,7 @@ func upgradeServer(installType string, serverIPs []string) error { defer GinkgoRecover() fmt.Println("Upgrading server to: " + upgradeCommand) - if _, err := util.RunCommandOnNode(upgradeCommand, ip); err != nil { + if _, err := shared.RunCommandOnNode(upgradeCommand, ip); err != nil { fmt.Printf("\nError upgrading server %s: %v\n\n", ip, err) errCh <- err close(errCh) @@ -108,7 +110,7 @@ func upgradeServer(installType string, serverIPs []string) error { } fmt.Println("Restarting server: " + ip) - if _, err := util.RestartCluster(ip); err != nil { + if _, err := shared.RestartCluster(ip); err != nil { fmt.Printf("\nError restarting server %s: %v\n\n", ip, err) errCh <- err close(errCh) @@ -130,10 +132,10 @@ func upgradeAgent(installType string, agentIPs []string) error { for _, ip := range agentIPs { switch { - case customflag.InstallType.Version != "": - installType = fmt.Sprintf("INSTALL_RKE2_VERSION=%s", customflag.InstallType.Version) - case customflag.InstallType.Commit != "": - installType = fmt.Sprintf("INSTALL_RKE2_COMMIT=%s", customflag.InstallType.Commit) + case customflag.ServiceFlag.InstallType.Version != "": + installType = fmt.Sprintf("INSTALL_RKE2_VERSION=%s", customflag.ServiceFlag.InstallType.Version) + case customflag.ServiceFlag.InstallType.Commit != "": + installType = fmt.Sprintf("INSTALL_RKE2_COMMIT=%s", customflag.ServiceFlag.InstallType.Commit) } installRke2Agent := "sudo curl -sfL https://get.rke2.io | sudo %s INSTALL_RKE2_TYPE=agent sh - " @@ -144,7 +146,7 @@ func upgradeAgent(installType string, agentIPs []string) error { defer GinkgoRecover() fmt.Println("Upgrading agent to: " + upgradeCommand) - if _, err := util.RunCommandOnNode(upgradeCommand, ip); err != nil { + if _, err := shared.RunCommandOnNode(upgradeCommand, ip); err != nil { fmt.Printf("\nError upgrading agent %s: %v\n\n", ip, err) errCh <- err close(errCh) @@ -152,7 +154,7 @@ func upgradeAgent(installType string, agentIPs []string) error { } fmt.Println("Restarting agent: " + ip) - if _, err := util.RestartCluster(ip); err != nil { + if _, err := shared.RestartCluster(ip); err != nil { fmt.Printf("\nError restarting agent %s: %v\n\n", ip, err) errCh <- err close(errCh) diff --git a/tests/acceptance/entrypoint/createcluster/createcluster_suite_test.go b/tests/acceptance/entrypoint/createcluster/createcluster_suite_test.go index 263f5a8ec65..7c6ecfcfe8e 100644 --- a/tests/acceptance/entrypoint/createcluster/createcluster_suite_test.go +++ b/tests/acceptance/entrypoint/createcluster/createcluster_suite_test.go @@ -2,26 +2,32 @@ package createcluster import ( "flag" + "os" "testing" + "github.com/rancher/rke2/tests/acceptance/core/service/customflag" "github.com/rancher/rke2/tests/acceptance/core/service/factory" - "github.com/rancher/rke2/tests/acceptance/shared/util" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) +func TestMain(m *testing.M) { + flag.Var(&customflag.ServiceFlag.ClusterConfig.Destroy, "destroy", "Destroy cluster after test") + flag.Parse() + os.Exit(m.Run()) +} + func TestClusterCreateSuite(t *testing.T) { RegisterFailHandler(Fail) - flag.Parse() RunSpecs(t, "Create Cluster Test Suite") } var _ = AfterSuite(func() { g := GinkgoT() - if *util.Destroy { - status, err := factory.BuildCluster(g, *util.Destroy) + if customflag.ServiceFlag.ClusterConfig.Destroy { + status, err := factory.DestroyCluster(g) Expect(err).NotTo(HaveOccurred()) Expect(status).To(Equal("cluster destroyed")) } diff --git a/tests/acceptance/entrypoint/createcluster/createcluster_test.go b/tests/acceptance/entrypoint/createcluster/createcluster_test.go index 2137038f944..def9fcf8069 100644 --- a/tests/acceptance/entrypoint/createcluster/createcluster_test.go +++ b/tests/acceptance/entrypoint/createcluster/createcluster_test.go @@ -5,7 +5,7 @@ import ( "github.com/rancher/rke2/tests/acceptance/core/service/assert" "github.com/rancher/rke2/tests/acceptance/core/testcase" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/ginkgo/v2" ) @@ -35,37 +35,31 @@ var _ = Describe("Test:", func() { It("Verifies ClusterIP Service", func() { testcase.TestServiceClusterIp(true) - defer util.ManageWorkload("delete", "clusterip.yaml") + defer shared.ManageWorkload("delete", "clusterip.yaml") }) It("Verifies NodePort Service", func() { testcase.TestServiceNodePort(true) - defer util.ManageWorkload("delete", "nodeport.yaml") + defer shared.ManageWorkload("delete", "nodeport.yaml") }) It("Verifies Ingress", func() { testcase.TestIngress(true) - defer util.ManageWorkload("delete", "ingress.yaml") + defer shared.ManageWorkload("delete", "ingress.yaml") }) It("Verifies Daemonset", func() { testcase.TestDaemonset(true) - defer util.ManageWorkload("delete", "daemonset.yaml") + defer shared.ManageWorkload("delete", "daemonset.yaml") }) It("Verifies dns access", func() { testcase.TestDnsAccess(true) - defer util.ManageWorkload("delete", "dnsutils.yaml") + defer shared.ManageWorkload("delete", "dnsutils.yaml") }) }) }) -var _ = BeforeEach(func() { - if *util.Destroy { - Skip("Cluster is being Deleted") - } -}) - var _ = AfterEach(func() { if CurrentSpecReport().Failed() { fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) diff --git a/tests/acceptance/entrypoint/upgradecluster/upgradecluster_suite_test.go b/tests/acceptance/entrypoint/upgradecluster/upgradecluster_suite_test.go index eae7608289d..2eca142465d 100644 --- a/tests/acceptance/entrypoint/upgradecluster/upgradecluster_suite_test.go +++ b/tests/acceptance/entrypoint/upgradecluster/upgradecluster_suite_test.go @@ -7,19 +7,18 @@ import ( "github.com/rancher/rke2/tests/acceptance/core/service/customflag" "github.com/rancher/rke2/tests/acceptance/core/service/factory" - "github.com/rancher/rke2/tests/acceptance/shared/util" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) func TestMain(m *testing.M) { - flag.Var(&customflag.InstallType, "installtype", "Upgrade to run with type=value,"+ + flag.Var(&customflag.ServiceFlag.ClusterConfig.Destroy, "destroy", "Destroy cluster after test") + flag.Var(&customflag.ServiceFlag.InstallType, "installtype", "Upgrade to run with type=value,"+ "INSTALL_RKE2_VERSION=v1.26.2+rke2r1 or INSTALL_RKE2_COMMIT=1823dsad7129873192873129asd") - flag.Var(&customflag.UpgradeVersionSUC, "upgradeVersionSUC", "Upgrade SUC model") + flag.Var(&customflag.ServiceFlag.UpgradeVersionSUC, "upgradeVersionSUC", "Upgrade SUC model") flag.Parse() - os.Exit(m.Run()) } @@ -31,8 +30,8 @@ func TestClusterUpgradeSuite(t *testing.T) { var _ = AfterSuite(func() { g := GinkgoT() - if *util.Destroy { - status, err := factory.BuildCluster(g, *util.Destroy) + if customflag.ServiceFlag.ClusterConfig.Destroy { + status, err := factory.DestroyCluster(g) Expect(err).NotTo(HaveOccurred()) Expect(status).To(Equal("cluster destroyed")) } diff --git a/tests/acceptance/entrypoint/upgradecluster/upgrademanual_test.go b/tests/acceptance/entrypoint/upgradecluster/upgrademanual_test.go index af03992e83d..83974be433b 100644 --- a/tests/acceptance/entrypoint/upgradecluster/upgrademanual_test.go +++ b/tests/acceptance/entrypoint/upgradecluster/upgrademanual_test.go @@ -8,7 +8,7 @@ import ( "github.com/rancher/rke2/tests/acceptance/core/service/assert" "github.com/rancher/rke2/tests/acceptance/core/service/customflag" "github.com/rancher/rke2/tests/acceptance/core/testcase" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/ginkgo/v2" ) @@ -58,13 +58,13 @@ var _ = Describe("Test:", func() { }) It("Upgrade manual", func() { - _ = testcase.TestUpgradeClusterManually(customflag.InstallType.String()) + _ = testcase.TestUpgradeClusterManually(customflag.ServiceFlag.InstallType.String()) }) It("Checks Node Status pos upgrade", func() { testcase.TestNodeStatus( assert.NodeAssertReadyStatus(), - assert.NodeAssertVersionTypeUpgraded(&customflag.InstallType), + assert.NodeAssertVersionTypeUpgraded(&customflag.ServiceFlag.InstallType), ) }) @@ -78,36 +78,30 @@ var _ = Describe("Test:", func() { It("Verifies ClusterIP Service Post upgrade", func() { testcase.TestServiceClusterIp(false) - defer util.ManageWorkload("delete", "clusterip.yaml") + defer shared.ManageWorkload("delete", "clusterip.yaml") }) It("Verifies NodePort Service Post upgrade", func() { testcase.TestServiceNodePort(false) - defer util.ManageWorkload("delete", "nodeport.yaml") + defer shared.ManageWorkload("delete", "nodeport.yaml") }) It("Verifies Ingress Post upgrade", func() { testcase.TestIngress(false) - defer util.ManageWorkload("delete", "ingress.yaml") + defer shared.ManageWorkload("delete", "ingress.yaml") }) It("Verifies Daemonset Post upgrade", func() { testcase.TestDaemonset(false) - defer util.ManageWorkload("delete", "daemonset.yaml") + defer shared.ManageWorkload("delete", "daemonset.yaml") }) It("Verifies DNS Access Post upgrade", func() { testcase.TestDnsAccess(true) - defer util.ManageWorkload("delete", "dns.yaml") + defer shared.ManageWorkload("delete", "dns.yaml") }) }) -var _ = BeforeEach(func() { - if *util.Destroy { - Skip("Cluster is being Deleted") - } -}) - var _ = AfterEach(func() { if CurrentSpecReport().Failed() { fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) diff --git a/tests/acceptance/entrypoint/upgradecluster/upgradesuc_test.go b/tests/acceptance/entrypoint/upgradecluster/upgradesuc_test.go index 9fae1513390..b10c957dc01 100644 --- a/tests/acceptance/entrypoint/upgradecluster/upgradesuc_test.go +++ b/tests/acceptance/entrypoint/upgradecluster/upgradesuc_test.go @@ -8,7 +8,7 @@ import ( "github.com/rancher/rke2/tests/acceptance/core/service/assert" "github.com/rancher/rke2/tests/acceptance/core/service/customflag" "github.com/rancher/rke2/tests/acceptance/core/testcase" - "github.com/rancher/rke2/tests/acceptance/shared/util" + "github.com/rancher/rke2/tests/acceptance/shared" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -56,7 +56,7 @@ var _ = Describe("SUC Upgrade Tests:", func() { }) It("\nUpgrade via SUC", func() { - err := testcase.TestUpgradeClusterSUC(customflag.UpgradeVersionSUC.String()) + err := testcase.TestUpgradeClusterSUC(customflag.ServiceFlag.UpgradeVersionSUC.String()) Expect(err).NotTo(HaveOccurred()) }) @@ -77,27 +77,27 @@ var _ = Describe("SUC Upgrade Tests:", func() { It("Verifies ClusterIP Service pos upgrade", func() { testcase.TestServiceClusterIp(false) - defer util.ManageWorkload("delete", "clusterip.yaml") + defer shared.ManageWorkload("delete", "clusterip.yaml") }) It("Verifies NodePort Service pos upgrade", func() { testcase.TestServiceNodePort(false) - defer util.ManageWorkload("delete", "nodeport.yaml") + defer shared.ManageWorkload("delete", "nodeport.yaml") }) It("Verifies Ingress pos upgrade", func() { testcase.TestIngress(false) - defer util.ManageWorkload("delete", "ingress.yaml") + defer shared.ManageWorkload("delete", "ingress.yaml") }) It("Verifies Daemonset pos upgrade", func() { testcase.TestDaemonset(false) - defer util.ManageWorkload("delete", "daemonset.yaml") + defer shared.ManageWorkload("delete", "daemonset.yaml") }) It("Verifies DNS Access pos upgrade", func() { testcase.TestDnsAccess(true) - defer util.ManageWorkload("delete", "dns.yaml") + defer shared.ManageWorkload("delete", "dns.yaml") }) }) diff --git a/tests/acceptance/entrypoint/versionbump/constants.go b/tests/acceptance/entrypoint/versionbump/constants.go deleted file mode 100644 index c03f2537f00..00000000000 --- a/tests/acceptance/entrypoint/versionbump/constants.go +++ /dev/null @@ -1,12 +0,0 @@ -package versionbump - -var ( - ExpectedValueUpgradedHost string - ExpectedValueUpgradedNode string - CmdHost string - ExpectedValueHost string - CmdNode string - ExpectedValueNode string - Description string - GetRuncVersion = "(find /var/lib/rancher/rke2/data/ -type f -name runc -exec {} --version \\;)" -) diff --git a/tests/acceptance/entrypoint/versionbump/version_suite_test.go b/tests/acceptance/entrypoint/versionbump/version_suite_test.go index 5c7e4b24d6c..f993f16ba31 100644 --- a/tests/acceptance/entrypoint/versionbump/version_suite_test.go +++ b/tests/acceptance/entrypoint/versionbump/version_suite_test.go @@ -9,34 +9,34 @@ import ( "github.com/rancher/rke2/tests/acceptance/core/service/customflag" "github.com/rancher/rke2/tests/acceptance/core/service/factory" "github.com/rancher/rke2/tests/acceptance/core/service/template" - "github.com/rancher/rke2/tests/acceptance/shared/util" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) func TestMain(m *testing.M) { - flag.StringVar(&CmdHost, "cmdHost", "", "Comma separated list of commands to execute on host") - flag.StringVar(&ExpectedValueHost, "expectedValueHost", "", "Comma separated list of expected values for host commands") - flag.StringVar(&CmdNode, "cmdNode", "", "Comma separated list of commands to execute on node") - flag.StringVar(&ExpectedValueNode, "expectedValueNode", "", "Comma separated list of expected values for node commands") - flag.StringVar(&ExpectedValueUpgradedHost, "expectedValueUpgradedHost", "", "Expected value of the command ran on Host after upgrading") - flag.StringVar(&ExpectedValueUpgradedNode, "expectedValueUpgradedNode", "", "Expected value of the command ran on Node after upgrading") - flag.Var(&customflag.InstallUpgradeFlag, "installUpgradeFlag", "Install upgrade customflag") - flag.StringVar(&Description, "description", "", "Description of the test") - flag.Var(&customflag.TestCase, "testCase", "Test case to run") - flag.BoolVar(&customflag.TestCase.DeployWorkload, "deployWorkload", false, "Deploy workload customflag") + flag.Var(&customflag.ServiceFlag.ClusterConfig.Destroy, "destroy", "Destroy cluster after test") + flag.StringVar(&template.TestMapFlag.CmdHost, "cmdHost", "", "Comma separated list of commands to execute on host") + flag.StringVar(&template.TestMapFlag.ExpectedValueHost, "expectedValueHost", "", "Comma separated list of expected values for host commands") + flag.StringVar(&template.TestMapFlag.CmdNode, "cmdNode", "", "Comma separated list of commands to execute on node") + flag.StringVar(&template.TestMapFlag.ExpectedValueNode, "expectedValueNode", "", "Comma separated list of expected values for node commands") + flag.StringVar(&template.TestMapFlag.ExpectedValueUpgradedHost, "expectedValueUpgradedHost", "", "Expected value of the command ran on Host after upgrading") + flag.StringVar(&template.TestMapFlag.ExpectedValueUpgradedNode, "expectedValueUpgradedNode", "", "Expected value of the command ran on Node after upgrading") + flag.Var(&customflag.ServiceFlag.InstallUpgrade, "installUpgradeFlag", "Install upgrade customflag") + flag.StringVar(&template.TestMapFlag.Description, "description", "", "Description of the test") + flag.Var(&customflag.ServiceFlag.TestCase, "testCase", "Test case to run") + flag.BoolVar(&customflag.ServiceFlag.TestCase.DeployWorkload, "deployWorkload", false, "Deploy workload customflag") flag.Parse() - testFunc, err := template.GetTestCase(customflag.TestCase.TestFuncName) + testFunc, err := template.GetTestCase(customflag.ServiceFlag.TestCase.TestFuncName) if err != nil { fmt.Printf("Error: %v\n", err) return } if testFunc != nil { - customflag.TestCase.TestFunc = customflag.TestCaseFlagType(testFunc) + customflag.ServiceFlag.TestCase.TestFunc = customflag.TestCaseFlagType(testFunc) } os.Exit(m.Run()) @@ -49,8 +49,8 @@ func TestVersionTestSuite(t *testing.T) { var _ = AfterSuite(func() { g := GinkgoT() - if *util.Destroy { - status, err := factory.BuildCluster(g, *util.Destroy) + if customflag.ServiceFlag.ClusterConfig.Destroy { + status, err := factory.DestroyCluster(g) Expect(err).NotTo(HaveOccurred()) Expect(status).To(Equal("cluster destroyed")) } diff --git a/tests/acceptance/entrypoint/versionbump/versionbump_test.go b/tests/acceptance/entrypoint/versionbump/versionbump_test.go index 28fdfdbfdc9..5a475d48935 100644 --- a/tests/acceptance/entrypoint/versionbump/versionbump_test.go +++ b/tests/acceptance/entrypoint/versionbump/versionbump_test.go @@ -9,7 +9,6 @@ import ( "github.com/rancher/rke2/tests/acceptance/core/service/customflag" "github.com/rancher/rke2/tests/acceptance/core/service/template" "github.com/rancher/rke2/tests/acceptance/core/testcase" - "github.com/rancher/rke2/tests/acceptance/shared/util" . "github.com/onsi/ginkgo/v2" ) @@ -37,38 +36,32 @@ var _ = Describe("VersionTemplate Upgrade:", func() { It("Test Bump version", func() { template.VersionTemplate(template.VersionTestTemplate{ - Description: Description, + Description: template.TestMapFlag.Description, TestCombination: &template.RunCmd{ RunOnHost: []template.TestMap{ { - Cmd: CmdHost, - ExpectedValue: ExpectedValueHost, - ExpectedValueUpgrade: ExpectedValueUpgradedHost, + Cmd: template.TestMapFlag.CmdHost, + ExpectedValue: template.TestMapFlag.ExpectedValueHost, + ExpectedValueUpgrade: template.TestMapFlag.ExpectedValueUpgradedHost, }, }, RunOnNode: []template.TestMap{ { - Cmd: CmdNode, - ExpectedValue: ExpectedValueNode, - ExpectedValueUpgrade: ExpectedValueUpgradedNode, + Cmd: template.TestMapFlag.CmdNode, + ExpectedValue: template.TestMapFlag.ExpectedValueNode, + ExpectedValueUpgrade: template.TestMapFlag.ExpectedValueUpgradedNode, }, }, }, - InstallUpgrade: customflag.InstallUpgradeFlag, + InstallUpgrade: customflag.ServiceFlag.InstallUpgrade, TestConfig: &template.TestConfig{ - TestFunc: template.TestCase(customflag.TestCase.TestFunc), - DeployWorkload: customflag.TestCase.DeployWorkload, + TestFunc: template.TestCase(customflag.ServiceFlag.TestCase.TestFunc), + DeployWorkload: customflag.ServiceFlag.TestCase.DeployWorkload, }, }) }) }) -var _ = BeforeEach(func() { - if *util.Destroy { - Skip("Cluster is being Deleted") - } -}) - var _ = AfterEach(func() { if CurrentSpecReport().Failed() { fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) diff --git a/tests/acceptance/entrypoint/versionbump/versioncoredns_test.go b/tests/acceptance/entrypoint/versionbump/versioncoredns_test.go index f23c24514f6..dc6008c60a0 100644 --- a/tests/acceptance/entrypoint/versionbump/versioncoredns_test.go +++ b/tests/acceptance/entrypoint/versionbump/versioncoredns_test.go @@ -9,7 +9,6 @@ import ( "github.com/rancher/rke2/tests/acceptance/core/service/customflag" "github.com/rancher/rke2/tests/acceptance/core/service/template" "github.com/rancher/rke2/tests/acceptance/core/testcase" - "github.com/rancher/rke2/tests/acceptance/shared/util" . "github.com/onsi/ginkgo/v2" ) @@ -41,13 +40,14 @@ var _ = Describe("VersionTemplate Upgrade:", func() { TestCombination: &template.RunCmd{ RunOnHost: []template.TestMap{ { - Cmd: util.GetCoreDNSdeployImage, - ExpectedValue: ExpectedValueHost, - ExpectedValueUpgrade: ExpectedValueUpgradedHost, + Cmd: "kubectl get deploy rke2-coredns-rke2-coredns -n kube-system -o " + + "jsonpath='{.spec.template.spec.containers[?(@.name==\"coredns\")].image}'", + ExpectedValue: template.TestMapFlag.ExpectedValueHost, + ExpectedValueUpgrade: template.TestMapFlag.ExpectedValueUpgradedHost, }, }, }, - InstallUpgrade: customflag.InstallUpgradeFlag, + InstallUpgrade: customflag.ServiceFlag.InstallUpgrade, TestConfig: &template.TestConfig{ TestFunc: testcase.TestCoredns, DeployWorkload: true, @@ -56,12 +56,6 @@ var _ = Describe("VersionTemplate Upgrade:", func() { }) }) -var _ = BeforeEach(func() { - if *util.Destroy { - Skip("Cluster is being Deleted") - } -}) - var _ = AfterEach(func() { if CurrentSpecReport().Failed() { fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) diff --git a/tests/acceptance/entrypoint/versionbump/versionrunc_test.go b/tests/acceptance/entrypoint/versionbump/versionrunc_test.go index eacabccb444..6e575b7dae1 100644 --- a/tests/acceptance/entrypoint/versionbump/versionrunc_test.go +++ b/tests/acceptance/entrypoint/versionbump/versionrunc_test.go @@ -5,11 +5,12 @@ package versionbump import ( "fmt" - . "github.com/onsi/ginkgo/v2" "github.com/rancher/rke2/tests/acceptance/core/service/assert" "github.com/rancher/rke2/tests/acceptance/core/service/customflag" "github.com/rancher/rke2/tests/acceptance/core/service/template" "github.com/rancher/rke2/tests/acceptance/core/testcase" + + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("VersionTemplate Upgrade:", func() { @@ -39,13 +40,13 @@ var _ = Describe("VersionTemplate Upgrade:", func() { TestCombination: &template.RunCmd{ RunOnNode: []template.TestMap{ { - Cmd: GetRuncVersion, - ExpectedValue: ExpectedValueNode, - ExpectedValueUpgrade: ExpectedValueUpgradedNode, + Cmd: "(find /var/lib/rancher/rke2/data/ -type f -name runc -exec {} --version \\;)", + ExpectedValue: template.TestMapFlag.ExpectedValueNode, + ExpectedValueUpgrade: template.TestMapFlag.ExpectedValueUpgradedNode, }, }, }, - InstallUpgrade: customflag.InstallUpgradeFlag, + InstallUpgrade: customflag.ServiceFlag.InstallUpgrade, TestConfig: nil, }) }) diff --git a/tests/acceptance/fixtures/workloads/upgrade-plan.yaml b/tests/acceptance/fixtures/workloads/upgrade-plan.yaml deleted file mode 100644 index 5c576f10e9c..00000000000 --- a/tests/acceptance/fixtures/workloads/upgrade-plan.yaml +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: upgrade.cattle.io/v1 -kind: Plan -metadata: - name: rke2-server-cp - namespace: system-upgrade - labels: - rke2-upgrade: server -spec: - concurrency: 1 - version: $UPGRADEVERSION - nodeSelector: - matchExpressions: - - {key: node-role.kubernetes.io/control-plane, operator: In, values: ["true"]} - tolerations: - - operator: Exists - serviceAccountName: system-upgrade - cordon: true - upgrade: - image: rancher/rke2-upgrade ---- -apiVersion: upgrade.cattle.io/v1 -kind: Plan -metadata: - name: rke2-server-etcd - namespace: system-upgrade - labels: - rke2-upgrade: server -spec: - concurrency: 1 - version: $UPGRADEVERSION - nodeSelector: - matchExpressions: - - key: "rke.cattle.io/etcd-role" - operator: Exists - - key: node-role.kubernetes.io/etcd - operator: In - values: [ "true" ] - - key: node-role.kubernetes.io/control-plane - operator: NotIn - values: [ "true" ] - tolerations: - - operator: Exists - serviceAccountName: system-upgrade - prepare: - image: rancher/rke2-upgrade - args: ["prepare", "rke2-server-cp"] - cordon: true - drain: - force: true - upgrade: - image: rancher/rke2-upgrade ---- -apiVersion: upgrade.cattle.io/v1 -kind: Plan -metadata: - name: rke2-agent - namespace: system-upgrade - labels: - rke2-upgrade: agent -spec: - concurrency: 2 - version: $UPGRADEVERSION - nodeSelector: - matchExpressions: - - {key: node-role.kubernetes.io/etcd, operator: NotIn, values: ["true"]} - - {key: node-role.kubernetes.io/control-plane, operator: NotIn, values: ["true"]} - serviceAccountName: system-upgrade - prepare: - image: rancher/rke2-upgrade - args: ["prepare", "rke2-server-etcd"] - drain: - force: true - upgrade: - image: rancher/rke2-upgrade \ No newline at end of file diff --git a/tests/acceptance/shared/scripts/Dockerfile.build b/tests/acceptance/scripts/Dockerfile.build similarity index 100% rename from tests/acceptance/shared/scripts/Dockerfile.build rename to tests/acceptance/scripts/Dockerfile.build diff --git a/tests/acceptance/shared/scripts/Jenkinsfile b/tests/acceptance/scripts/Jenkinsfile similarity index 96% rename from tests/acceptance/shared/scripts/Jenkinsfile rename to tests/acceptance/scripts/Jenkinsfile index f8d05e324b8..25ff90fbe0f 100644 --- a/tests/acceptance/shared/scripts/Jenkinsfile +++ b/tests/acceptance/scripts/Jenkinsfile @@ -75,7 +75,7 @@ node { stage('Run TestCombination') { sh "docker run --name ${testContainer} -t --env-file ${envFile} " + "${imageName} sh -c \"chmod 400 /go/src/github.com/rancher/rke2/tests/acceptance/modules/config/.ssh/aws_key.pem && " + - "cd ./tests/acceptance/entrypoint && go test -timeout=${timeout} -v ./${RKE2_TESTCASE}/... ${RKE2_TEST_ARGS}\"" + "cd ./tests/acceptance/entrypoint && go test -timeout=${timeout} -v ./${RKE2_TESTCASE}/... ${RKE2_TEST_ARGS}\"" } } finally { stage('Cleanup') { diff --git a/tests/acceptance/shared/scripts/build.sh b/tests/acceptance/scripts/build.sh similarity index 100% rename from tests/acceptance/shared/scripts/build.sh rename to tests/acceptance/scripts/build.sh diff --git a/tests/acceptance/shared/scripts/configure.sh b/tests/acceptance/scripts/configure.sh similarity index 100% rename from tests/acceptance/shared/scripts/configure.sh rename to tests/acceptance/scripts/configure.sh diff --git a/tests/acceptance/shared/scripts/delete_resources.sh b/tests/acceptance/scripts/delete_resources.sh similarity index 100% rename from tests/acceptance/shared/scripts/delete_resources.sh rename to tests/acceptance/scripts/delete_resources.sh diff --git a/tests/acceptance/shared/util/aux.go b/tests/acceptance/shared/aux.go similarity index 99% rename from tests/acceptance/shared/util/aux.go rename to tests/acceptance/shared/aux.go index c84cd43d4ea..eb1abb89e2d 100644 --- a/tests/acceptance/shared/util/aux.go +++ b/tests/acceptance/shared/aux.go @@ -1,4 +1,4 @@ -package util +package shared import ( "bytes" diff --git a/tests/acceptance/shared/util/cluster.go b/tests/acceptance/shared/cluster.go similarity index 87% rename from tests/acceptance/shared/util/cluster.go rename to tests/acceptance/shared/cluster.go index e0225aa6f43..808b8fd1116 100644 --- a/tests/acceptance/shared/util/cluster.go +++ b/tests/acceptance/shared/cluster.go @@ -1,4 +1,4 @@ -package util +package shared import ( "fmt" @@ -9,6 +9,12 @@ import ( "github.com/onsi/gomega" ) +var ( + KubeConfigFile string + AwsUser string + AccessKey string +) + type Node struct { Name string Status string @@ -41,7 +47,7 @@ func ManageWorkload(action, workload string) (string, error) { var res string var err error - resourceDir := BasePath() + "/fixtures/workloads/" + resourceDir := BasePath() + "/acceptance/workloads/" files, err := os.ReadDir(resourceDir) if err != nil { @@ -94,7 +100,7 @@ func deleteWorkload(workload, filename string) (string, error) { // IsWorkloadDeleted returns true if the workload is deleted. func IsWorkloadDeleted(workload string) (bool, error) { - res, err := RunCommandHost(GetAll + KubeConfigFile) + res, err := RunCommandHost("kubectl get all -A --kubeconfig=" + KubeConfigFile) if err != nil { return false, err } @@ -160,17 +166,23 @@ func addKubectlCommand(action, source string, args []string) string { // Nodes returns the list of nodes in the cluster and parses the output with parseNodes. func Nodes(print bool) ([]Node, error) { - return parseNodes(GetNodesWide+KubeConfigFile, print) + return parseNodes("kubectl get nodes --no-headers -o wide --kubeconfig="+KubeConfigFile, print) } // WorkerNodes returns the list of worker nodes in the cluster. func WorkerNodes(print bool) ([]Node, error) { - return parseNodes(GetWorkerNodes+KubeConfigFile+GrepNoExec, print) + return parseNodes("kubectl get node -o jsonpath='{range .items[*]}{@.metadata.name} "+ + "{@.status.conditions[-1].type} "+ + "{@.status.nodeInfo.kubeletVersion} "+ + "{@.status.addresses[?(@.type==\"InternalIP\")].address} "+ + "{@.status.addresses[?(@.type==\"ExternalIP\")].address} "+ + "{@.spec.taints[*].effect}{\"\\n\"}{end}' "+ + "--kubeconfig="+KubeConfigFile+GrepNoExec, print) } // Pods returns the list of pods in the cluster and parses the output with parsePods. func Pods(print bool) ([]Pod, error) { - return parsePods(GetPodsWide+KubeConfigFile, print) + return parsePods("kubectl get pods -o wide --no-headers -A --kubeconfig="+KubeConfigFile, print) } // FetchClusterIP returns the cluster IP and port of the service. @@ -195,7 +207,8 @@ func FetchClusterIP( // FetchNodeExternalIP returns the external IP of the nodes. func FetchNodeExternalIP() []string { - res, _ := RunCommandHost(GetNodesExternalIp + KubeConfigFile) + res, _ := RunCommandHost("kubectl get nodes --output=jsonpath='{.items[*].status.addresses[?(@.type==\"ExternalIP\")].address}' " + + "--kubeconfig=" + KubeConfigFile) nodeExternalIP := strings.Trim(res, " ") nodeExternalIPs := strings.Split(nodeExternalIP, " ") diff --git a/tests/acceptance/shared/util/constants.go b/tests/acceptance/shared/util/constants.go deleted file mode 100644 index 1cb149f1f7e..00000000000 --- a/tests/acceptance/shared/util/constants.go +++ /dev/null @@ -1,33 +0,0 @@ -package util - -import ( - "flag" -) - -// global configurations -var ( - Destroy = flag.Bool("destroy", false, "a bool") - KubeConfigFile string - ServerIPs string - AgentIPs string - NumServers int - NumAgents int - AwsUser string - AccessKey string - - ExecDnsUtils = "kubectl exec -n auto-dns -t dnsutils --kubeconfig=" - GetAll = "kubectl get all -A --kubeconfig=" - GetNodesWide = "kubectl get nodes --no-headers -o wide --kubeconfig=" - GetPodsWide = "kubectl get pods -o wide --no-headers -A --kubeconfig=" - GetNodesExternalIp = "kubectl get nodes --output=jsonpath='{.items[*].status.addresses[?(@.type==\"ExternalIP\")].address}' --kubeconfig=" - GetCoreDNSdeployImage = "kubectl get deploy rke2-coredns-rke2-coredns -n kube-system -o jsonpath='{.spec.template.spec.containers[?(@.name==\"coredns\")].image}'" - GetWorkerNodes = "kubectl get node -o jsonpath='{range .items[*]}{@.metadata.name} " + - "{@.status.conditions[-1].type} " + - "{@.status.nodeInfo.kubeletVersion} " + - "{@.status.addresses[?(@.type==\"InternalIP\")].address} " + - "{@.status.addresses[?(@.type==\"ExternalIP\")].address} " + - "{@.spec.taints[*].effect}{\"\\n\"}{end}' " + - "--kubeconfig=" - Running = "Running" - Nslookup = "kubernetes.default.svc.cluster.local" -) diff --git a/tests/acceptance/fixtures/workloads/clusterip.yaml b/tests/acceptance/workloads/clusterip.yaml similarity index 100% rename from tests/acceptance/fixtures/workloads/clusterip.yaml rename to tests/acceptance/workloads/clusterip.yaml diff --git a/tests/acceptance/fixtures/workloads/daemonset.yaml b/tests/acceptance/workloads/daemonset.yaml similarity index 100% rename from tests/acceptance/fixtures/workloads/daemonset.yaml rename to tests/acceptance/workloads/daemonset.yaml diff --git a/tests/acceptance/fixtures/workloads/dnsutils.yaml b/tests/acceptance/workloads/dnsutils.yaml similarity index 100% rename from tests/acceptance/fixtures/workloads/dnsutils.yaml rename to tests/acceptance/workloads/dnsutils.yaml diff --git a/tests/acceptance/fixtures/workloads/ingress.yaml b/tests/acceptance/workloads/ingress.yaml similarity index 100% rename from tests/acceptance/fixtures/workloads/ingress.yaml rename to tests/acceptance/workloads/ingress.yaml diff --git a/tests/acceptance/fixtures/workloads/nodeport.yaml b/tests/acceptance/workloads/nodeport.yaml similarity index 100% rename from tests/acceptance/fixtures/workloads/nodeport.yaml rename to tests/acceptance/workloads/nodeport.yaml diff --git a/tests/acceptance/fixtures/workloads/suc.yaml b/tests/acceptance/workloads/suc.yaml similarity index 100% rename from tests/acceptance/fixtures/workloads/suc.yaml rename to tests/acceptance/workloads/suc.yaml diff --git a/tests/acceptance/fixtures/workloads/traefiklogs.yaml b/tests/acceptance/workloads/traefiklogs.yaml similarity index 100% rename from tests/acceptance/fixtures/workloads/traefiklogs.yaml rename to tests/acceptance/workloads/traefiklogs.yaml