From 907c6baf354ceef685a4a6ec3ed91da02a759405 Mon Sep 17 00:00:00 2001 From: Francisco Date: Fri, 26 May 2023 19:59:48 -0300 Subject: [PATCH] feat: add acceptance framework Signed-off-by: Francisco --- tests/acceptance/.gitignore | 8 + .../{terraform => acceptance}/.golangci.yaml | 5 +- tests/acceptance/Makefile | 126 +++++ tests/acceptance/README.md | 427 +++++++++++++++++ tests/{terraform => acceptance}/config.mk | 2 +- tests/acceptance/core/service/assert/host.go | 34 ++ tests/acceptance/core/service/assert/node.go | 80 ++++ tests/acceptance/core/service/assert/pod.go | 82 ++++ .../core/service/assert/validate.go | 94 ++++ tests/acceptance/core/service/constants.go | 11 + .../core/service/customflag/model.go | 115 +++++ .../core/service/factory/cluster.go | 88 ++++ .../core/service/template/helper.go | 104 ++++ .../acceptance/core/service/template/model.go | 36 ++ .../core/service/template/processor.go | 99 ++++ .../core/service/template/versiontemplate.go | 39 ++ tests/acceptance/core/testcase/cluster.go | 33 ++ tests/acceptance/core/testcase/coredns.go | 34 ++ tests/acceptance/core/testcase/daemonset.go | 23 + tests/acceptance/core/testcase/ingressdns.go | 61 +++ tests/acceptance/core/testcase/node.go | 35 ++ tests/acceptance/core/testcase/pod.go | 48 ++ tests/acceptance/core/testcase/service.go | 56 +++ .../core/testcase/upgradecluster.go | 165 +++++++ .../createcluster/createcluster_suite_test.go | 28 ++ .../createcluster/createcluster_test.go | 75 +++ .../upgradecluster_suite_test.go | 39 ++ .../upgradecluster/upgrademanual_test.go | 117 +++++ .../upgradecluster/upgradesuc_test.go | 110 +++++ .../versionbump/version_suite_test.go | 58 +++ .../versionbump/versionbump_test.go | 79 +++ .../versionbump/versioncoredns_test.go | 72 +++ .../versionbump/versionrunc_test.go | 63 +++ .../fixtures/workloads}/clusterip.yaml | 0 .../fixtures/workloads}/daemonset.yaml | 0 .../fixtures/workloads}/dnsutils.yaml | 0 .../fixtures/workloads}/ingress.yaml | 0 .../fixtures/workloads}/nodeport.yaml | 0 .../fixtures/workloads}/suc.yaml | 0 .../fixtures/workloads/traefiklogs.yaml | 13 + .../fixtures/workloads}/upgrade-plan.yaml | 12 +- tests/{terraform => acceptance}/go.mod | 10 +- tests/{terraform => acceptance}/go.sum | 18 +- .../modules/install}/define_node_role.sh | 0 .../modules/install}/install_rke2_master.sh | 0 .../modules/install}/join_rke2_agent.sh | 2 +- .../modules/install}/join_rke2_master.sh | 2 +- .../modules/install}/optional_write_files.sh | 2 +- .../{terraform => acceptance}/modules/main.tf | 2 +- .../modules/master/instances_server.tf | 12 +- .../modules/master/outputs.tf | 0 .../modules/master/variables.tf | 2 +- .../modules/outputs.tf | 2 +- tests/acceptance/modules/providers.tf | 3 + .../modules/variables.tf | 2 +- .../modules/worker/instances_worker.tf | 2 +- .../modules/worker/outputs.tf | 2 +- .../modules/worker/variables.tf | 2 +- .../shared}/scripts/Dockerfile.build | 0 .../shared}/scripts/Jenkinsfile | 16 +- .../shared}/scripts/build.sh | 2 +- .../shared}/scripts/configure.sh | 0 .../shared/scripts/delete_resources.sh | 86 ++++ tests/acceptance/shared/util/aux.go | 204 ++++++++ tests/acceptance/shared/util/cluster.go | 286 +++++++++++ tests/acceptance/shared/util/constants.go | 71 +++ tests/terraform/.gitignore | 4 - tests/terraform/Makefile | 75 --- tests/terraform/README.md | 107 ----- .../terraform/createcluster/createcluster.go | 92 ---- .../createcluster/createcluster_test.go | 259 ---------- .../manual_upgrade/upgradecluster_test.go | 387 --------------- tests/terraform/modules/providers.tf | 3 - tests/terraform/scripts/delete_resources.sh | 86 ---- tests/terraform/suc_upgrade/upgradecluster.go | 28 -- .../suc_upgrade/upgradecluster_test.go | 453 ------------------ tests/terraform/testutils.go | 292 ----------- .../upgradecluster/upgradecluster_test.go | 453 ------------------ 78 files changed, 3154 insertions(+), 2284 deletions(-) create mode 100644 tests/acceptance/.gitignore rename tests/{terraform => acceptance}/.golangci.yaml (95%) create mode 100644 tests/acceptance/Makefile create mode 100644 tests/acceptance/README.md rename tests/{terraform => acceptance}/config.mk (82%) create mode 100644 tests/acceptance/core/service/assert/host.go create mode 100644 tests/acceptance/core/service/assert/node.go create mode 100644 tests/acceptance/core/service/assert/pod.go create mode 100644 tests/acceptance/core/service/assert/validate.go create mode 100644 tests/acceptance/core/service/constants.go create mode 100644 tests/acceptance/core/service/customflag/model.go create mode 100644 tests/acceptance/core/service/factory/cluster.go create mode 100644 tests/acceptance/core/service/template/helper.go create mode 100644 tests/acceptance/core/service/template/model.go create mode 100644 tests/acceptance/core/service/template/processor.go create mode 100644 tests/acceptance/core/service/template/versiontemplate.go create mode 100644 tests/acceptance/core/testcase/cluster.go create mode 100644 tests/acceptance/core/testcase/coredns.go create mode 100644 tests/acceptance/core/testcase/daemonset.go create mode 100644 tests/acceptance/core/testcase/ingressdns.go create mode 100644 tests/acceptance/core/testcase/node.go create mode 100644 tests/acceptance/core/testcase/pod.go create mode 100644 tests/acceptance/core/testcase/service.go create mode 100644 tests/acceptance/core/testcase/upgradecluster.go create mode 100644 tests/acceptance/entrypoint/createcluster/createcluster_suite_test.go create mode 100644 tests/acceptance/entrypoint/createcluster/createcluster_test.go create mode 100644 tests/acceptance/entrypoint/upgradecluster/upgradecluster_suite_test.go create mode 100644 tests/acceptance/entrypoint/upgradecluster/upgrademanual_test.go create mode 100644 tests/acceptance/entrypoint/upgradecluster/upgradesuc_test.go create mode 100644 tests/acceptance/entrypoint/versionbump/version_suite_test.go create mode 100644 tests/acceptance/entrypoint/versionbump/versionbump_test.go create mode 100644 tests/acceptance/entrypoint/versionbump/versioncoredns_test.go create mode 100644 tests/acceptance/entrypoint/versionbump/versionrunc_test.go rename tests/{terraform/resource_files => acceptance/fixtures/workloads}/clusterip.yaml (100%) rename tests/{terraform/resource_files => acceptance/fixtures/workloads}/daemonset.yaml (100%) rename tests/{terraform/resource_files => acceptance/fixtures/workloads}/dnsutils.yaml (100%) rename tests/{terraform/resource_files => acceptance/fixtures/workloads}/ingress.yaml (100%) rename tests/{terraform/resource_files => acceptance/fixtures/workloads}/nodeport.yaml (100%) rename tests/{terraform/resource_files => acceptance/fixtures/workloads}/suc.yaml (100%) create mode 100644 tests/acceptance/fixtures/workloads/traefiklogs.yaml rename tests/{terraform/resource_files => acceptance/fixtures/workloads}/upgrade-plan.yaml (83%) rename tests/{terraform => acceptance}/go.mod (93%) rename tests/{terraform => acceptance}/go.sum (99%) rename tests/{terraform/modules => acceptance/modules/install}/define_node_role.sh (100%) rename tests/{terraform/modules => acceptance/modules/install}/install_rke2_master.sh (100%) mode change 100755 => 100644 rename tests/{terraform/modules => acceptance/modules/install}/join_rke2_agent.sh (98%) mode change 100755 => 100644 rename tests/{terraform/modules => acceptance/modules/install}/join_rke2_master.sh (98%) mode change 100755 => 100644 rename tests/{terraform/modules => acceptance/modules/install}/optional_write_files.sh (99%) rename tests/{terraform => acceptance}/modules/main.tf (99%) rename tests/{terraform => acceptance}/modules/master/instances_server.tf (97%) rename tests/{terraform => acceptance}/modules/master/outputs.tf (100%) rename tests/{terraform => acceptance}/modules/master/variables.tf (97%) rename tests/{terraform => acceptance}/modules/outputs.tf (99%) create mode 100644 tests/acceptance/modules/providers.tf rename tests/{terraform => acceptance}/modules/variables.tf (99%) rename tests/{terraform => acceptance}/modules/worker/instances_worker.tf (97%) rename tests/{terraform => acceptance}/modules/worker/outputs.tf (97%) rename tests/{terraform => acceptance}/modules/worker/variables.tf (95%) rename tests/{terraform => acceptance/shared}/scripts/Dockerfile.build (100%) rename tests/{terraform => acceptance/shared}/scripts/Jenkinsfile (85%) rename tests/{terraform => acceptance/shared}/scripts/build.sh (74%) rename tests/{terraform => acceptance/shared}/scripts/configure.sh (100%) create mode 100755 tests/acceptance/shared/scripts/delete_resources.sh create mode 100644 tests/acceptance/shared/util/aux.go create mode 100644 tests/acceptance/shared/util/cluster.go create mode 100644 tests/acceptance/shared/util/constants.go delete mode 100644 tests/terraform/.gitignore delete mode 100644 tests/terraform/Makefile delete mode 100644 tests/terraform/README.md delete mode 100644 tests/terraform/createcluster/createcluster.go delete mode 100644 tests/terraform/createcluster/createcluster_test.go delete mode 100644 tests/terraform/manual_upgrade/upgradecluster_test.go delete mode 100644 tests/terraform/modules/providers.tf delete mode 100755 tests/terraform/scripts/delete_resources.sh delete mode 100644 tests/terraform/suc_upgrade/upgradecluster.go delete mode 100644 tests/terraform/suc_upgrade/upgradecluster_test.go delete mode 100644 tests/terraform/testutils.go delete mode 100644 tests/terraform/upgradecluster/upgradecluster_test.go diff --git a/tests/acceptance/.gitignore b/tests/acceptance/.gitignore new file mode 100644 index 00000000000..574916ee83a --- /dev/null +++ b/tests/acceptance/.gitignore @@ -0,0 +1,8 @@ +.ssh/ +config/ +/tests/.vscode +/.idea/ +/modules/.terraform/ +*.terraform* +*.tfstate* +*plan.yaml \ No newline at end of file diff --git a/tests/terraform/.golangci.yaml b/tests/acceptance/.golangci.yaml similarity index 95% rename from tests/terraform/.golangci.yaml rename to tests/acceptance/.golangci.yaml index 273f792582f..e3f678b5a1f 100644 --- a/tests/terraform/.golangci.yaml +++ b/tests/acceptance/.golangci.yaml @@ -28,7 +28,7 @@ linters-settings: ignore-generated-header: true rules: - name: line-length-limit - arguments: [100] + arguments: [110] - name: cognitive-complexity arguments: [10] - name: empty-lines @@ -37,7 +37,6 @@ linters-settings: - name: blank-imports - name: confusing-naming - name: confusing-results - - name: context-as-argument - name: duplicated-imports - name: early-return - name: empty-block @@ -46,7 +45,7 @@ linters-settings: - name: error-strings - name: errorf - name: exported - - name: flag-parameter + - name: customflag-parameter - name: get-return - name: if-return - name: increment-decrement diff --git a/tests/acceptance/Makefile b/tests/acceptance/Makefile new file mode 100644 index 00000000000..f4383965f4f --- /dev/null +++ b/tests/acceptance/Makefile @@ -0,0 +1,126 @@ +##========================= Acceptance Tests =========================# +include ./config.mk + +TAGNAME ?= default +test-env-up: + @cd ../.. && docker build . -q -f ./tests/acceptance/shared/scripts/Dockerfile.build -t rke2-automated-${TAGNAME} + +# -d +.PHONY: test-run +test-run: + docker run --name rke2-automated-test-${IMGNAME} -t \ + -e AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID}" \ + -e AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY}" \ + -v ${ACCESS_KEY_LOCAL}:/go/src/github.com/rancher/rke2/tests/acceptance/modules/config/.ssh/aws_key.pem \ + rke2-automated-${TAGNAME} sh -c 'cd ./tests/acceptance/entrypoint; \ + if [ -n "${TESTDIR}" ]; then \ + if [ "${TESTDIR}" = "upgradecluster" ]; then \ + if [ "${TESTTAG}" = "upgradesuc" ]; then \ + go test -timeout=45m -v -tags=upgradesuc ./upgradecluster/... -upgradeVersionSUC "${ARGVALUE}"; \ + elif [ "${TESTTAG}" = "upgrademanual" ]; then \ + go test -timeout=45m -v -tags=upgrademanual ./upgradecluster/... -installtype "${INSTALLTYPE}"; \ + fi; \ + elif [ "${TESTDIR}" = "versionbump" ]; then \ + if [ "${TESTTAG}" = "versionbump" ]; then \ + go test -timeout=45m -v -tags=versionbump ./versionbump/... -cmdNode "${CMDNODE}" -expectedValueNode "${VALUENODE}" \ + -expectedValueUpgradedNode "${VALUENODEUPGRADED}" -cmdHost "${CMDHOST}" -expectedValueHost "${VALUEHOST}" \ + -expectedValueUpgradedHost "${VALUEHOSTUPGRADED}" -installUpgradeFlag "${INSTALLTYPE}" -testCase "${TESTCASE}" \ + -deployWorkload "${DEPLOYWORKLOAD}"; \ + -description "${DESCRIPTION}"; \ + elif [ "${TESTTAG}" = "coredns" ]; then \ + go test -timeout=45m -v -tags=coredns ./versionbump/... -expectedValueNode "${VALUENODE}" \ + -expectedValueUpgradedNode "${VALUENODEUPGRADED}" -expectedValueHost "${VALUEHOST}" \ + -expectedValueUpgradedHost "${VALUEHOSTUPGRADED}" -installUpgradeFlag "${INSTALLTYPE}"; \ + elif [ "${TESTTAG}" = "runc" ]; then \ + go test -timeout=45m -v -tags=runc ./versionbump/... \ + -expectedValueNode ${VALUENODE} \ + -expectedValueUpgradedNode ${VALUENODEUPGRADED} \ + -installUpgradeFlag ${INSTALLTYPE}; \ + fi; \ + fi; \ + elif [ -z "${TESTDIR}" ]; then \ + go test -timeout=45m -v ./createcluster/...; \ + fi;' + + +.PHONY: test-logs +test-logs: + @docker logs -f rke2-automated-test-${IMGNAME} + + +.PHONY: test-env-down +test-env-down: + @echo "Removing containers and images" + @docker stop $$(docker ps -a -q --filter="name=rke2-automated*") + @docker rm $$(docker ps -a -q --filter="name=rke2-automated*") + @docker rmi $$(docker images -q --filter="reference=rke2-automated*") + + +.PHONY: test-env-clean +test-env-clean: + @./shared/scripts/delete_resources.sh + + +.PHONY: test-complete +test-complete: test-env-clean test-env-down remove-tf-state test-env-up test-run + + +.PHONY: remove-tf-state +remove-tf-state: + @rm -rf ./modules/.terraform + @rm -rf ./modules/.terraform.lock.hcl ./modules/terraform.tfstate ./modules/terraform.tfstate.backup + + +#======================== Run acceptance tests locally =========================# + +.PHONY: test-create +test-create: + @go test -timeout=45m -v ./entrypoint/createcluster/... + + +.PHONY: test-upgrade-suc +test-upgrade-suc: + @go test -timeout=45m -v -tags=upgradesuc ./entrypoint/upgradecluster/... -upgradeVersionSUC ${ARGVALUE} + + +.PHONY: test-upgrade-manual +test-upgrade-manual: + @go test -timeout=45m -v -tags=upgrademanual ./entrypoint/upgradecluster/... -installtype ${INSTALLTYPE} + + +.PHONY: test-version-runc +test-version-runc: + @go test -timeout=45m -v -tags=runc ./entrypoint/versionbump/... \ + -expectedValueNode ${VALUENODE} \ + -expectedValueUpgradedNode ${VALUENODEUPGRADED} \ + -installUpgradeFlag ${INSTALLTYPE} \ + + + +.PHONY: test-version-coredns +test-version-coredns: + @go test -timeout=45m -v -tags=coredns ./entrypoint/versionbump/... \ + -expectedValueHost ${VALUEHOST} \ + -expectedValueUpgradedHost ${VALUEHOSTUPGRADED} \ + -installUpgradeFlag ${INSTALLTYPE} \ + + +.PHONY: test-version-bump +test-version-bump: + @go test -timeout=45m -v -tags=versionbump ./entrypoint/versionbump/... \ + -cmdNode "${CMDNODE}" \ + -expectedValueNode ${VALUENODE} \ + -expectedValueUpgradedNode ${VALUENODEUPGRADED} \ + -cmdHost "${CMDHOST}" \ + -expectedValueHost ${VALUEHOST} \ + -expectedValueUpgradedHost ${VALUEHOSTUPGRADED} \ + -installUpgradeFlag ${INSTALLTYPE} \ + -testCase ${TESTCASE} \ + -deployWorkload ${DEPLOYWORKLOAD} + + +#========================= TestCode Static Quality Check =========================# +.PHONY: vet-lint ## Run locally only inside Tests package +vet-lint: + @echo "Running go vet and lint" + @go vet ./${TESTDIR} && golangci-lint run --tests \ No newline at end of file diff --git a/tests/acceptance/README.md b/tests/acceptance/README.md new file mode 100644 index 00000000000..63279f78d16 --- /dev/null +++ b/tests/acceptance/README.md @@ -0,0 +1,427 @@ +## Acceptance Test Framework + +The acceptance tests are a customizable way to create clusters and perform validations on them such that the requirements of specific features and functions can be validated. + +- It relies on [Terraform](https://www.terraform.io/) to provide the underlying cluster configuration. +- It uses [Ginkgo](https://onsi.github.io/ginkgo/) and [Gomega](https://onsi.github.io/gomega/) as assertion framework. + + +## Architecture +- For better maintenance, readability and productivity we encourage max of separation of concerns and loose coupling between packages so inner packages should not depend on outer packages + +### Packages: +```bash +./acceptance +│ +├── core +│ └───── Place where resides the logic and services for it +| +├── entrypoint +│ └───── Where is located the entrypoint for tests execution, separated by test runs and test suites +│ +├── fixtures +│ └───── Place where resides the fixtures for tests +│ +├── modules +│ └───── Terraform modules and configurations +│ +├── shared +│ └───── shared and reusable functions, workloads, constants, and scripts + +``` + + + +### Explanation: + +- `Core` +``` + Service: + +Act: Acts as a provider for customizations across framework +Responsibility: Should not depend on any outer layer only in the core itself, the idea is to provide not rely on. + + + Testcase: + +Act: Acts as a innermost layer where the main logic (test implementations) is handled. +Responsibility: Encapsulate test logic and should not depend on any outer layer + +``` + +- `Entrypoint` +```` +Act: Acts as the one of the outer layer to receive the input to start test execution +Responsibility: Should not need to implement any logic and only focus on orchestrating +```` + +- `Fixtures` +``` +Act: It acts as a provider for test fixtures +Responsibility: Totally independent from any other layer and should only provide +``` + +- `Modules` +``` +Act: It acts as the infra to provide the terraform modules and configurations +Responsibility: Only provides indirectly for all, should not need the knowledge of any test logic or have dependencies from internal layers. +``` + +- `Shared` +``` +Act: It acts as an intermediate module providing shared and reusable functions, constants, and scripts +Responsibility: Should not need the knowledge or "external" dependency at all, provides for all. +``` + +#### PS: "External" and "Outer" dependency here in this context is considered any other package within the acceptance framework. + +------------------- + +### `Template Bump Version Model ` + +- We have a template model interface for testing bump versions, the idea is to provide a simple and direct way to test bump of version using go test tool. + + +```You can test that like:``` + +- Adding one version or commit and ran some commands on it and check it against respective expected values then upgrade and repeat the same commands and check the respective new (or not) expected values. + + +```How can I do that?``` + +- Step 1: Add your desired first version or commit that you want to use on `local.tfvars` file on the vars `rke2_version` and `install_mode` +- Step 2: Have the commands you need to run and the expected output from them +- Step 3: Have a version or commit that you want to upgrade to. +- Step 4: Create your go test file in `acceptance/entrypoint/versionbump/versionbump{mytestname}.go`. +- Step 5: Get the template from `acceptance/entrypoint/versionbump/versionbump.go` and copy it to your test file. +- Step 6: Fill the template with your data ( RunCmdNode and RunCmdHost) with your respective commands. +- Step 7: On the TestConfig field you can add another test case that we already have or a newly created one. +- Step 8: Create the go test command and the make command to run it. +- Step 9: Run the command and wait for results. +- Step 10: (WIP) Export your customizable report. + + +------------------- +- RunCmdNode: + Commands like: + - $ `curl ...` + - $ `sudo chmod ...` + - $ `sudo systemctl ...` + - $ `grep ...` + - $ `rke2 --version` + + +- RunCmdHost: + Basically commands like: + - $ `kubectl ...` + - $ `helm ...` + + + +Available arguments to create your command with examples: +```` +- $ -cmdHost kubectl describe pod -n kube-system local-path-provisioner-, | grep -i Image" +- $ -expectedValueHost "v0.0.21" +- $ -expectedValueUpgradedHost "v0.0.24" +- $ -cmdNode "rke2 --version" +- $ -expectedValueNode "v1.25.2+k3s1" +- $ -expectedValuesUpgradedNode "v1.26.4-rc1+rke2r1" +- $ -upgradeVersionSUC "v1.26.4-rc1+rke2r1" +- $ -installUpgradeFlag=INSTALL_RKE2_COMMIT=257fa2c54cda332e42b8aae248c152f4d1898218 +- $ -deployWorkload=true +- $ -testCase=TestCaseName +- $ -description "Description of your test" +```` + +Example of an execution considering that the `commands` are already placed or inside your test function or the *template itself (example below the command): +```bash + go test -v -timeout=45m -tags=coredns ./entrypoint/versionbump/... \ + -expectedValueHost "v1.9.3" \ + -expectedValueUpgradedHost "v1.10.1" \ + -expectedValueNode "v1.25.9+rke2r1" \ + -expectedValueUpgradedNode "v1.26.4-rc1+rke2r1" \ + -installUpgradeFlag INSTALL_RKE2_VERSION=v1.25.9+rke2r1 + +```` +PS: If you need to send more than one command at once split them with " , " + +#### `*template with commands and testcase added:` +```go +- Values passed in code. +------------------------------------------------------- +- util.GetCoreDNSdeployImage +- testcase.TestCoredns +------------------------------------------------------- + + +Value passed in command line +------------------------------------------------------- +-expectedValueHost "v1.9.3" +-expectedValueUpgradedHost "v1.10.1" +------------------------------------------------------- + + + template.VersionTemplate(template.VersionTestTemplate{ + Description: "Test CoreDNS bump", + TestCombination: &template.RunCmd{ + RunOnHost: []template.TestMap{ + { + Cmd: util.GetCoreDNSdeployImage, + ExpectedValue: service.ExpectedValueHost, + ExpectedValueUpgrade: service.ExpectedValueUpgradedHost, + }, + }, + }, + InstallUpgrade: customflag.InstallUpgradeFlag, + TestConfig: &template.TestConfig{ + TestFunc: testcase.TestCoredns, + DeployWorkload: true, + }, + }) +}) + +```` + + +#### You can also run a totally parametrized test with the template, just copy and paste the template and call everything as flags like that: +- Template +```` go + template.VersionTemplate(GinkgoT(), template.VersionTestTemplate{ + Description: util.Description, + TestCombination: &template.RunCmd{ + RunOnNode: []template.TestMap{ + { + Cmd: util.CmdNode, + ExpectedValue: util.ExpectedValueNode, + ExpectedValueUpgrade: util.ExpectedValueUpgradedNode, + }, + }, + RunOnHost: []template.TestMap{ + { + Cmd: util.CmdHost, + ExpectedValue: util.ExpectedValueHost, + ExpectedValueUpgrade: util.ExpectedValueUpgradedHost, + }, + }, + }, + InstallUpgrade: util.InstallUpgradeFlag, + TestConfig: &template.TestConfig{ + TestFunc: template.TestCase(util.TestCase.TestFunc), + DeployWorkload: util.TestCase.DeployWorkload, + }, + }) + }) +```` + +- Command +```bash +go test -timeout=45m -v -tags=versionbump ./entrypoint/versionbump/... \ + -cmdNode "rke2 --version" \ + -expectedValueNode "v1.25.3+rke2r1" \ + -expectedValueUpgradedNode "v1.25.9+rke2r1" \ + -cmdHost "kubectl get deploy rke2-coredns-rke2-coredns -n kube-system -o jsonpath='{.spec.template.spec.containers[?(@.name==\"coredns\")].image}'" \ + -expectedValueHost "v1.9.3" \ + -expectedValueUpgradedHost "v1.10.1" \ + -installUpgradeFlag INSTALL_RKE2_VERSION=v1.25.9+rke2r1 \ + -testCase "TestCoredns" \ + -deployWorkload true + +```` + +#### We also have this on the `makefile` to make things easier to run just adding the values, please see below on the makefile section + + +----- +#### Testcase naming convention: +- All tests should be placed under `tests/acceptance/testcase/`. +- All test functions should be named: `Test`. + + + +## Running + +- Before running the tests, you should creat local.tfvars file in `./tests/acceptance/modules/config/local.tfvars`. There is some information there to get you started, but the empty variables should be filled in appropriately per your AWS environment. + +- Please make sure to export your correct AWS credentials before running the tests. e.g: +```bash +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +``` + +- The local.tfvars split roles section should be strictly followed to not cause any false positives or negatives on tests + +- Please also when creating tf var resource_name, make sure that you do not have any instances from other automations with the same name to avoid deleting wrong resources + + +*** + +Tests can be run individually per package or per test tags from acceptance package: +```bash +go test -timeout=45m -v ./entrypoint/$PACKAGE_NAME/... + +go test -timeout=45m -v ./entrypoint/upgradecluster/... -installtype INSTALL_RKE2_VERSION=v1.25.8+rke2r1 -upgradeVersion v1.25.8+rke2r1 + +go test -timeout=45m -v -tags=upgrademanual ./entrypoint/upgradecluster/... -installtype INSTALL_RKE2_VERSION=v1.25.8+rke2r1 + +go test -timeout=45m -v -tags=upgradesuc ./entrypoint/upgradecluster/... -upgradeVersionSUC v1.25.8+rke2r1 + +``` +Test flags: +``` + ${upgradeVersion} version to upgrade to + -upgradeVersion=v1.26.2+rke2r1 + + ${installType} type of installation (version or commit) + desired value + -installType=version or commit +``` +Test tags: +``` + -tags=upgradesuc + -tags=upgrademanual + -tags=versionbump + -tags=runc + -tags=coredns +``` +### Run with `Makefile` through acceptance package: +```bash +- On the first run with make and docker please delete your .terraform folder, terraform.tfstate and terraform.hcl.lock file + +Args: +*Most of args are optional so you can fit to your use case. + +- ${IMGNAME} append any string to the end of image name +- ${TAGNAME} append any string to the end of tag name +- ${ARGNAME} name of the arg to pass to the test +- ${ARGVALUE} value of the arg to pass to the test +- ${TESTDIR} path to the test directory +- ${TESTFILE} path to the test file +- ${TAGTEST} name of the tag function from suite ( -tags=upgradesuc or -tags=upgrademanual ) +- ${TESTCASE} name of the testcase to run +- ${DEPLOYWORKLOAD} true or false to deploy workload +- ${CMDHOST} command to run on host +- ${VALUEHOST} value to check on host +- ${VALUEHOSTUPGRADED} value to check on host after upgrade +- ${CMDNODE} command to run on node +- ${VALUENODE} value to check on node +- ${VALUENODEUPGRADED} value to check on node after upgrade +- ${INSTALLTYPE} type of installation (version or commit) + desired value + + + +Commands: +$ make test-env-up # create the image from Dockerfile.build +$ make test-run # runs create and upgrade cluster by passing the argname and argvalue +$ make test-env-down # removes the image and container by prefix +$ make test-env-clean # removes instances and resources created by testcase +$ make test-logs # prints logs from container the testcase +$ make test-create # runs create cluster test locally +$ make test-version-runc # runs version runc test locally +$ make test-version-coredns # runs version coredns test locally +$ make test-upgrade-suc # runs upgrade via SUC +$ make test-upgrade-manual # runs upgrade manually +$ make test-version-bump # runs version bump test locally +$ make test-run # runs create and upgrade cluster by passing the argname and argvalue +$ make remove-tf-state # removes acceptance state dir and files +$ make test-suite # runs all testcase locally in sequence not using the same state +$ make vet-lint # runs go vet and go lint +``` + +### Examples with docker: +``` +- Create an image tagged +$ make test-env-up TAGNAME=ubuntu + +- Run upgrade cluster test with `${IMGNAME}` and `${TAGNAME}` +$ make test-run IMGNAME=2 TAGNAME=ubuntu + + +- Run create and upgrade cluster just adding `INSTALLTYPE` flag to upgrade +$ make test-run + + +- Run version bump test upgrading with commit id +$ make test-run IMGNAME=x \ + TAGNAME=y \ + TESTDIR=versionbump \ + CMDNODE="rke2 --version" \ + VALUENODE="v1.26.2+rke2r1" \ + CMDHOST="kubectl get image..." \ + VALUEHOST="v0.0.21" \ + INSTALLTYPE=INSTALL_RKE2_COMMIT=257fa2c54cda332e42b8aae248c152f4d1898218 \ + TESTCASE=TestCaseName \ + DEPLOYWORKLOAD=true +```` + +### Examples to run locally: +```` +- Run create cluster test: +$ make test-create + +- Run upgrade cluster test: +$ make test-upgrade-manual INSTALLTYPE=v1.26.2+k3s1 + + +- Run bump version for coreDNS test +$ make test-version-bump \ + CMDNODE='rke2 --version' \ + VALUENODE="v1.25.3+rke2r1" \ + VALUENODEUPGRADED="v1.25.9+rke2r1" \ + CMDHOST='kubectl get deploy rke2-coredns-rke2-coredns -n kube-system -o json, | grep "rancher/hardened-coredns" \ + VALUEHOST="v1.9.3" \ + VALUEHOSTUPGRADED="v1.10.1" \ + INSTALLTYPE=INSTALL_RKE2_VERSION=v1.25.9+rke2r1 \ + TESTCASE="TestCoredns" \ + DEPLOYWORKLOAD=true + + + +- Run bump version for runc test +$ make test-version-runc \ + CMDHOST='kubectl get nodes' \ + VALUEHOST="Ready" \ + VALUEHOSTUPGRADED="Ready" \ + CMDNODE="(find /var/lib/rancher/rke2/data/ -type f -name runc -exec {} --version \\;)" \ + VALUENODE="1.1.4" \ + VALUENODEUPGRADED="1.1.5" \ + INSTALLTYPE=INSTALL_RKE2_VERSION=v1.25.9+rke2r1 + + + +- Logs from test +$ make tf-logs IMGNAME=1 + +- Run lint for a specific directory +$ make vet-lint TESTDIR=upgradecluster + +```` + + +### Running tests in parallel: + +- You can play around and have a lot of different test combinations like: +``` +- Build docker image with different TAGNAME="OS`s" + with different configurations( resource_name, node_os, versions, install type, nodes and etc) and have unique "IMGNAMES" + +- And in the meanwhile run also locally with different configuration while your dockers TAGNAME and IMGNAMES are running +``` + + +### In between tests: +```` +- If you want to run with same cluster do not delete ./tests/acceptance/modules/terraform.tfstate + .terraform.lock.hcl file after each test. + +- If you want to use new resources then make sure to delete the ./tests/acceptance/modules/terraform.tfstate + .terraform.lock.hcl file if you want to create a new cluster. +```` + +### Common Issues: +``` +- Issues related to terraform plugin please also delete the modules/.terraform folder +- Issues related to terraform failed to find local token , please also delete modules/.terraform folder +- In mac m1 maybe you need also to go to rke2/tests/terraform/modules and run `terraform init` to download the plugins +``` + +### Debugging +```` +To focus individual runs on specific test clauses, you can prefix with `F`. For example, in the [create cluster test](../tests/acceptance/entrypoint/createcluster_test.go), you can update the initial creation to be: `FIt("Starts up with no issues", func() {` in order to focus the run on only that clause. +Or use break points in your IDE. +```` diff --git a/tests/terraform/config.mk b/tests/acceptance/config.mk similarity index 82% rename from tests/terraform/config.mk rename to tests/acceptance/config.mk index 89a789f0280..5d6aa266741 100644 --- a/tests/terraform/config.mk +++ b/tests/acceptance/config.mk @@ -1,6 +1,6 @@ SHELL := /bin/bash -TFVARS_PATH := terraform/modules/config/local.tfvars +TFVARS_PATH := acceptance/modules/config/local.tfvars ifeq ($(wildcard ${TFVARS_PATH}),) RESOURCE_NAME := diff --git a/tests/acceptance/core/service/assert/host.go b/tests/acceptance/core/service/assert/host.go new file mode 100644 index 00000000000..847299d34a7 --- /dev/null +++ b/tests/acceptance/core/service/assert/host.go @@ -0,0 +1,34 @@ +package assert + +import ( + "fmt" + "strings" + + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/gomega" +) + +// CheckComponentCmdHost runs a command on the host and asserts that the value received contains the specified substring +// +// you can send multiple asserts from a cmd but all of them must be true +// +// need to send sKubeconfigFile +func CheckComponentCmdHost(cmd string, asserts ...string) { + Eventually(func() error { + fmt.Printf("\nExecuting cmd: %s\n", cmd) + res, err := util.RunCommandHost(cmd) + if err != nil { + return fmt.Errorf("error on RunCommandHost: %v", err) + } + + for _, assert := range asserts { + if !strings.Contains(res, assert) { + return fmt.Errorf("expected substring %q not found in result %q", assert, res) + } + fmt.Printf("\nResult: %s \n", res) + fmt.Printf("Matches with assert: %s \n", assert) + } + return nil + }, "420s", "5s").Should(Succeed()) +} diff --git a/tests/acceptance/core/service/assert/node.go b/tests/acceptance/core/service/assert/node.go new file mode 100644 index 00000000000..8666fd3c8e1 --- /dev/null +++ b/tests/acceptance/core/service/assert/node.go @@ -0,0 +1,80 @@ +package assert + +import ( + "fmt" + + "github.com/rancher/rke2/tests/acceptance/core/service/customflag" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type NodeAssertFunc func(g Gomega, node util.Node) + +// NodeAssertVersionTypeUpgraded custom assertion func that asserts that node version is as expected +func NodeAssertVersionTypeUpgraded(installType *customflag.InstallTypeValue) NodeAssertFunc { + if installType.Version != "" { + fmt.Printf("Asserting Version: %s\n", installType.Version) + return func(g Gomega, node util.Node) { + g.Expect(node.Version).Should(Equal(installType.Version), + "Nodes should all be upgraded to the specified version", node.Name) + } + } + + if installType.Commit != "" { + version := util.GetRke2Version() + fmt.Printf("Asserting Commit: %s\n Version: %s", installType.Commit, version) + return func(g Gomega, node util.Node) { + g.Expect(version).Should(ContainSubstring(installType.Commit), + "Nodes should all be upgraded to the specified commit", node.Name) + } + } + + return func(g Gomega, node util.Node) { + GinkgoT().Errorf("no version or commit specified for upgrade assertion") + } +} + +// NodeAssertVersionUpgraded custom assertion func that asserts that node version is as expected +func NodeAssertVersionUpgraded() NodeAssertFunc { + return func(g Gomega, node util.Node) { + g.Expect(&customflag.UpgradeVersionSUC).Should(ContainSubstring(node.Version), + "Nodes should all be upgraded to the specified version", node.Name) + } +} + +// NodeAssertReadyStatus custom assertion func that asserts that the node is in Ready state. +func NodeAssertReadyStatus() NodeAssertFunc { + return func(g Gomega, node util.Node) { + g.Expect(node.Status).Should(Equal("Ready"), + "Nodes should all be in Ready state") + } +} + +// CheckComponentCmdNode runs a command on a node and asserts that the value received +// contains the specified substring. +func CheckComponentCmdNode(cmd, assert, ip string) { + Eventually(func(g Gomega) { + fmt.Printf("\nExecuting cmd: %s\n", cmd) + res, err := util.RunCommandOnNode(cmd, ip) + if err != nil { + return + } + + g.Expect(res).Should(ContainSubstring(assert)) + fmt.Printf("\nResult: %s \n", res) + fmt.Printf("Matches with assert: %s \n", assert) + }, "420s", "3s").Should(Succeed()) +} + +// NodeAssertCount custom assertion func that asserts that node count is as expected +func NodeAssertCount() NodeAssertFunc { + return func(g Gomega, node util.Node) { + expectedNodeCount := util.NumServers + util.NumAgents + nodes, err := util.Nodes(false) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(len(nodes)).To(Equal(expectedNodeCount), + "Number of nodes should match the spec") + } +} diff --git a/tests/acceptance/core/service/assert/pod.go b/tests/acceptance/core/service/assert/pod.go new file mode 100644 index 00000000000..6aebf5ca3fa --- /dev/null +++ b/tests/acceptance/core/service/assert/pod.go @@ -0,0 +1,82 @@ +package assert + +import ( + "fmt" + "strings" + + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +type PodAssertFunc func(g Gomega, pod util.Pod) + +// PodAssertRestart custom assertion func that asserts that pods are not restarting with no reason +// controller, scheduler, helm-install pods can be restarted occasionally when cluster started if only once +func PodAssertRestart() PodAssertFunc { + return func(g Gomega, pod util.Pod) { + if strings.Contains(pod.NameSpace, "kube-system") && + strings.Contains(pod.Name, "controller") && + strings.Contains(pod.Name, "scheduler") { + g.Expect(pod.Restarts).Should(SatisfyAny(Equal("0"), + Equal("1")), + "could be restarted occasionally when cluster started", pod.Name) + } + } +} + +// PodAssertReady custom assertion func that asserts that the pod is +// with correct numbers of ready containers. +func PodAssertReady() PodAssertFunc { + return func(g Gomega, pod util.Pod) { + g.ExpectWithOffset(1, pod.Ready).To(checkReadyFields(), + "should have equal values in n/n format") + } +} + +// checkReadyFields is a custom matcher that checks +// if the input string is in N/N format and the same quantity. +func checkReadyFields() types.GomegaMatcher { + return WithTransform(func(s string) (bool, error) { + var a, b int + + n, err := fmt.Sscanf(s, "%d/%d", &a, &b) + if err != nil || n != 2 { + return false, fmt.Errorf("failed to parse format: %v", err) + } + + return a == b, nil + }, BeTrue()) +} + +// PodAssertStatus custom assertion that asserts that pod status is completed or in some cases +// apply pods can have an error status +func PodAssertStatus() PodAssertFunc { + return func(g Gomega, pod util.Pod) { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal(util.CompletedAssert), pod.Name) + } else if strings.Contains(pod.Name, "apply") && + strings.Contains(pod.NameSpace, "system-upgrade") { + g.Expect(pod.Status).Should(SatisfyAny( + ContainSubstring("Error"), + Equal(util.CompletedAssert), + ), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal(util.RunningAssert), pod.Name) + } + } +} + +// CheckPodStatusRunning asserts that the pod is running with the specified label = app name. +func CheckPodStatusRunning(name, namespace, assert string) { + cmd := "kubectl get pods -n " + namespace + " -o=name -l k8s-app=" + name + + " --field-selector=status.phase=Running --kubeconfig=" + util.KubeConfigFile + Eventually(func(g Gomega) { + res, err := util.RunCommandHost(cmd) + if err != nil { + return + } + g.Expect(res).Should(ContainSubstring(assert)) + }, "180s", "5s").Should(Succeed()) +} diff --git a/tests/acceptance/core/service/assert/validate.go b/tests/acceptance/core/service/assert/validate.go new file mode 100644 index 00000000000..7e349a43c24 --- /dev/null +++ b/tests/acceptance/core/service/assert/validate.go @@ -0,0 +1,94 @@ +package assert + +import ( + "fmt" + "strings" + "time" + + "github.com/rancher/rke2/tests/acceptance/shared/util" +) + +// validate calls runAssertion for each cmd/assert pair +// +// the first caller - process tests will spawn a go routine per ip the cluster +// +// need to send KubeconfigFile +func validate(exec func(string) (string, error), args ...string) error { + if len(args) < 2 || len(args)%2 != 0 { + return fmt.Errorf("must receive an even number of arguments as cmd/assert pairs") + } + + errorsChan := make(chan error, len(args)/2) + timeout := time.After(220 * time.Second) + ticker := time.NewTicker(3 * time.Second) + + for i := 0; i < len(args); i++ { + cmd := args[i] + if i+1 < len(args) { + assert := args[i+1] + i++ + + err := runAssertion(cmd, assert, exec, ticker.C, timeout, errorsChan) + if err != nil { + close(errorsChan) + return err + } + } + } + close(errorsChan) + + return nil +} + +// runAssertion runs a command and asserts that the value received against his respective command +func runAssertion( + cmd, assert string, + exec func(string) (string, error), + ticker <-chan time.Time, + timeout <-chan time.Time, + errorsChan chan<- error, +) error { + for { + select { + case <-timeout: + timeoutErr := fmt.Errorf("timeout reached for command:\n%s\n "+ + "Trying to assert with:\n %s", + cmd, assert) + errorsChan <- timeoutErr + return timeoutErr + + case <-ticker: + res, err := exec(cmd) + if err != nil { + errorsChan <- err + return fmt.Errorf("from runCmd:\n %s\n %s", res, err) + } + + fmt.Printf("\nCMD: %s\nRESULT: %s\nAssertion: %s\n", cmd, res, assert) + if strings.Contains(res, assert) { + fmt.Printf("Matched with: \n%s\n", res) + errorsChan <- nil + return nil + } + } + } +} + +// ValidateOnHost runs an exec function on RunCommandHost and assert given is fulfilled. +// The last argument should be the assertion. +// Need to send kubeconfig file. +func ValidateOnHost(args ...string) error { + exec := func(cmd string) (string, error) { + return util.RunCommandHost(cmd) + } + return validate(exec, args...) +} + +// ValidateOnNode runs an exec function on RunCommandHost and assert given is fulfilled. +// The last argument should be the assertion. +func ValidateOnNode(ip string, args ...string) error { + exec := func(cmd string) (string, error) { + return util.RunCommandOnNode(cmd, ip) + } + return validate(exec, args...) +} diff --git a/tests/acceptance/core/service/constants.go b/tests/acceptance/core/service/constants.go new file mode 100644 index 00000000000..56842b31d52 --- /dev/null +++ b/tests/acceptance/core/service/constants.go @@ -0,0 +1,11 @@ +package service + +var ( + ExpectedValueUpgradedHost string + ExpectedValueUpgradedNode string + CmdHost string + ExpectedValueHost string + CmdNode string + ExpectedValueNode string + Description string +) diff --git a/tests/acceptance/core/service/customflag/model.go b/tests/acceptance/core/service/customflag/model.go new file mode 100644 index 00000000000..7218517f6b4 --- /dev/null +++ b/tests/acceptance/core/service/customflag/model.go @@ -0,0 +1,115 @@ +package customflag + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + UpgradeVersionSUC UpgradeVersion + InstallType InstallTypeValue + InstallUpgradeFlag MultiValueFlag + TestCase TestConfigFlag +) + +// UpgradeVersion is a custom type to use upgradeVersionSUC flag +type UpgradeVersion struct { + Version string +} + +// InstallTypeValue is a customflag type that can be used to parse the installation type +type InstallTypeValue struct { + Version string + Commit string +} + +// TestConfigFlag is a customflag type that can be used to parse the test case argument +type TestConfigFlag struct { + TestFuncName string + TestFunc TestCaseFlagType + DeployWorkload bool +} + +// TestCaseFlagType is a customflag type that can be used to parse the test case argument +type TestCaseFlagType func(deployWorkload bool) + +// MultiValueFlag is a customflag type that can be used to parse multiple values +type MultiValueFlag []string + +// String returns the string representation of the MultiValueFlag +func (m *MultiValueFlag) String() string { + return strings.Join(*m, ",") +} + +// Set func sets multiValueFlag appending the value +func (m *MultiValueFlag) Set(value string) error { + *m = append(*m, value) + return nil +} + +// String returns the string representation of the TestConfigFlag +func (t *TestConfigFlag) String() string { + return fmt.Sprintf("TestFuncName: %s, DeployWorkload: %t", t.TestFuncName, t.DeployWorkload) +} + +// Set parses the customflag value for TestConfigFlag +func (t *TestConfigFlag) Set(value string) error { + parts := strings.Split(value, ",") + + if len(parts) < 1 { + return fmt.Errorf("invalid test case customflag format") + } + + t.TestFuncName = parts[0] + if len(parts) > 1 { + deployWorkload, err := strconv.ParseBool(parts[1]) + if err != nil { + return fmt.Errorf("invalid deploy workload customflag: %v", err) + } + t.DeployWorkload = deployWorkload + } + + return nil +} + +// String returns the string representation of the InstallTypeValue +func (i *InstallTypeValue) String() string { + return fmt.Sprintf("Version: %s, Commit: %s", i.Version, i.Commit) +} + +// Set parses the input string and sets the Version or Commit field using Set customflag interface +func (i *InstallTypeValue) Set(value string) error { + parts := strings.Split(value, "=") + + if len(parts) == 2 { + switch parts[0] { + case "INSTALL_RKE2_VERSION": + i.Version = parts[1] + case "INSTALL_RKE2_COMMIT": + i.Commit = parts[1] + default: + return fmt.Errorf("invalid install type: %s", parts[0]) + } + } else { + return fmt.Errorf("invalid input format") + } + + return nil +} + +// String returns the string representation of the UpgradeVersion for SUC upgrade +func (t *UpgradeVersion) String() string { + return t.Version +} + +// Set parses the input string and sets the Version field for SUC upgrades +func (t *UpgradeVersion) Set(value string) error { + if strings.HasPrefix(value, "v") && strings.HasSuffix(value, "rke2r1") { + t.Version = value + } else { + return fmt.Errorf("invalid install format: %s", value) + } + + return nil +} diff --git a/tests/acceptance/core/service/factory/cluster.go b/tests/acceptance/core/service/factory/cluster.go new file mode 100644 index 00000000000..a26d7b5b41a --- /dev/null +++ b/tests/acceptance/core/service/factory/cluster.go @@ -0,0 +1,88 @@ +package factory + +import ( + "fmt" + "path/filepath" + "strconv" + + "github.com/gruntwork-io/terratest/modules/terraform" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" +) + +func BuildCluster(g GinkgoTInterface, destroy bool) (string, error) { + tfDir, err := filepath.Abs(util.BasePath() + util.ModulesPath) + if err != nil { + return "", err + } + + varDir, err := filepath.Abs(util.BasePath() + util.TfVarsPath) + if err != nil { + return "", err + } + + terraformOptions := terraform.Options{ + TerraformDir: tfDir, + VarFiles: []string{varDir}, + } + + util.NumServers, err = strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, + "no_of_server_nodes")) + if err != nil { + return "", err + } + util.NumAgents, err = strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, + "no_of_worker_nodes")) + if err != nil { + return "", err + } + + splitRoles := terraform.GetVariableAsStringFromVarFile(g, varDir, "split_roles") + if splitRoles == "true" { + etcdNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, + "etcd_only_nodes")) + if err != nil { + return "", err + } + etcdCpNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, + "etcd_cp_nodes")) + if err != nil { + return "", err + } + etcdWorkerNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, + "etcd_worker_nodes")) + if err != nil { + return "", err + } + cpNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, + "cp_only_nodes")) + if err != nil { + return "", err + } + cpWorkerNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(g, varDir, + "cp_worker_nodes")) + if err != nil { + return "", err + } + util.NumServers = util.NumServers + etcdNodes + etcdCpNodes + etcdWorkerNodes + + +cpNodes + cpWorkerNodes + } + + util.AwsUser = terraform.GetVariableAsStringFromVarFile(g, varDir, "aws_user") + util.AccessKey = terraform.GetVariableAsStringFromVarFile(g, varDir, "access_key") + fmt.Printf("\nCreating Cluster") + + if destroy { + fmt.Printf("Cluster is being deleted") + terraform.Destroy(g, &terraformOptions) + return "cluster destroyed", err + } + + terraform.InitAndApply(g, &terraformOptions) + util.KubeConfigFile = terraform.Output(g, &terraformOptions, "kubeconfig") + util.ServerIPs = terraform.Output(g, &terraformOptions, "master_ips") + util.AgentIPs = terraform.Output(g, &terraformOptions, "worker_ips") + + return "cluster created", nil +} diff --git a/tests/acceptance/core/service/template/helper.go b/tests/acceptance/core/service/template/helper.go new file mode 100644 index 00000000000..eea7f75e99b --- /dev/null +++ b/tests/acceptance/core/service/template/helper.go @@ -0,0 +1,104 @@ +package template + +import ( + "fmt" + "strings" + "sync" + + "github.com/rancher/rke2/tests/acceptance/core/testcase" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" +) + +// upgradeVersion upgrades the version of RKE2 and updates the expected values +func upgradeVersion(template VersionTestTemplate, version string) error { + err := testcase.TestUpgradeClusterManually(version) + if err != nil { + return err + } + + for i := range template.TestCombination.RunOnNode { + template.TestCombination.RunOnNode[i].ExpectedValue = + template.TestCombination.RunOnNode[i].ExpectedValueUpgrade + } + + for i := range template.TestCombination.RunOnHost { + template.TestCombination.RunOnHost[i].ExpectedValue = + template.TestCombination.RunOnHost[i].ExpectedValueUpgrade + } + + return nil +} + +// checkVersion checks the version of RKE2 by calling processTestCombination +func checkVersion(v VersionTestTemplate) error { + ips, err := getIPs() + if err != nil { + GinkgoT().Errorf("Failed to get IPs: %s", err) + } + + var wg sync.WaitGroup + errorChanList := make( + chan error, + len(ips)*(len(v.TestCombination.RunOnHost)+len(v.TestCombination.RunOnNode)), + ) + + processTestCombination(errorChanList, ips, *v.TestCombination) + + wg.Wait() + close(errorChanList) + + for chanErr := range errorChanList { + if chanErr != nil { + return chanErr + } + } + + if v.TestConfig != nil { + TestCaseWrapper(v) + } + + return nil +} + +// joinCommands joins the first command with some arg +func joinCommands(cmd, kubeconfigFlag string) string { + cmds := strings.Split(cmd, ",") + joinedCmd := cmds[0] + kubeconfigFlag + + if len(cmds) > 1 { + secondCmd := strings.Join(cmds[1:], ",") + joinedCmd += " " + secondCmd + } + + return joinedCmd +} + +// getIPs gets the IPs of the nodes +func getIPs() (ips []string, err error) { + ips = util.FetchNodeExternalIP() + return ips, nil +} + +// GetTestCase returns the test case based on the name to be used as customflag. +func GetTestCase(name string) (TestCase, error) { + if name == "" { + return func(deployWorkload bool) {}, nil + } + + testCase := map[string]TestCase{ + "TestDaemonset": testcase.TestDaemonset, + "TestIngress": testcase.TestIngress, + "TestDnsAccess": testcase.TestDnsAccess, + "TestServiceClusterIp": testcase.TestServiceClusterIp, + "TestServiceNodePort": testcase.TestServiceNodePort, + "TestCoredns": testcase.TestCoredns, + } + + if test, ok := testCase[name]; ok { + return test, nil + } + + return nil, fmt.Errorf("invalid test case name") +} diff --git a/tests/acceptance/core/service/template/model.go b/tests/acceptance/core/service/template/model.go new file mode 100644 index 00000000000..76ccefecfd0 --- /dev/null +++ b/tests/acceptance/core/service/template/model.go @@ -0,0 +1,36 @@ +package template + +// VersionTestTemplate represents a version test scenario with test configurations and commands. +type VersionTestTemplate struct { + Description string + TestCombination *RunCmd + InstallUpgrade []string + TestConfig *TestConfig +} + +// RunCmd represents the command sets to run on host and node. +type RunCmd struct { + RunOnHost []TestMap + RunOnNode []TestMap +} + +// TestMap represents a single test command with key:value pairs. +type TestMap struct { + Cmd string + ExpectedValue string + ExpectedValueUpgrade string +} + +// TestConfig represents the testcase function configuration +type TestConfig struct { + TestFunc TestCase + DeployWorkload bool +} + +// TestCase is a custom type representing the test function. +type TestCase func(deployWorkload bool) + +// TestCaseWrapper wraps a test function and calls it with the given VersionTestTemplate. +func TestCaseWrapper(v VersionTestTemplate) { + v.TestConfig.TestFunc(v.TestConfig.DeployWorkload) +} diff --git a/tests/acceptance/core/service/template/processor.go b/tests/acceptance/core/service/template/processor.go new file mode 100644 index 00000000000..9b67e0e2811 --- /dev/null +++ b/tests/acceptance/core/service/template/processor.go @@ -0,0 +1,99 @@ +package template + +import ( + "fmt" + "sync" + + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" +) + +// processTestCombination runs the tests per ips using CmdOnNode and CmdOnHost validation +// it will spawn a go routine per ip +func processTestCombination(resultChan chan error, ips []string, testCombination RunCmd) { + var wg sync.WaitGroup + + for _, ip := range ips { + if testCombination.RunOnHost != nil { + for _, test := range testCombination.RunOnHost { + wg.Add(1) + go func(ip string, cmd, expectedValue, expectedValueUpgraded string) { + defer wg.Done() + defer GinkgoRecover() + processOnHost(resultChan, ip, cmd, expectedValue) + }(ip, test.Cmd, test.ExpectedValue, test.ExpectedValueUpgrade) + } + } + + if testCombination.RunOnNode != nil { + for _, test := range testCombination.RunOnNode { + wg.Add(1) + go func(ip string, cmd, expectedValue string) { + defer wg.Done() + defer GinkgoRecover() + processOnNode(resultChan, ip, cmd, expectedValue) + }(ip, test.Cmd, test.ExpectedValue) + } + } + } +} + +// processOnNode runs the test on the node calling ValidateOnNode +func processOnNode(resultChan chan error, ip, cmd, expectedValue string) { + if expectedValue == "" { + err := fmt.Errorf("\nexpected value should be sent to node") + fmt.Println("error:", err) + resultChan <- err + close(resultChan) + return + } + + version := util.GetRke2Version() + fmt.Printf("\n Checking version: %s on ip: %s \n "+ + "Command: %s, \n Expected Value: %s", version, ip, cmd, expectedValue) + + joinedCmd := joinCommands(cmd, "") + + err := assert.ValidateOnNode( + ip, + joinedCmd, + expectedValue, + ) + if err != nil { + fmt.Println("error:", err) + resultChan <- err + close(resultChan) + return + } +} + +// processOnHost runs the test on the host calling ValidateOnHost +func processOnHost(resultChan chan error, ip, cmd, expectedValue string) { + if expectedValue == "" { + err := fmt.Errorf("\nexpected value should be sent to host") + fmt.Println("error:", err) + resultChan <- err + close(resultChan) + return + } + + kubeconfigFlag := " --kubeconfig=" + util.KubeConfigFile + fullCmd := joinCommands(cmd, kubeconfigFlag) + + version := util.GetRke2Version() + fmt.Printf("\n Checking version: %s on ip: %s \n "+ + "Command: %s, \n Expected Value: %s", version, ip, fullCmd, expectedValue) + + err := assert.ValidateOnHost( + fullCmd, + expectedValue, + ) + if err != nil { + fmt.Println("error:", err) + resultChan <- err + close(resultChan) + return + } +} diff --git a/tests/acceptance/core/service/template/versiontemplate.go b/tests/acceptance/core/service/template/versiontemplate.go new file mode 100644 index 00000000000..7f96cc63278 --- /dev/null +++ b/tests/acceptance/core/service/template/versiontemplate.go @@ -0,0 +1,39 @@ +package template + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" +) + +// VersionTemplate is a template for testing RKE2 versions + test cases and upgrading cluster if needed +func VersionTemplate(test VersionTestTemplate) { + err := checkVersion(test) + if err != nil { + GinkgoT().Errorf(err.Error()) + return + } + + for _, version := range test.InstallUpgrade { + if GinkgoT().Failed() { + fmt.Println("checkVersion failed, upgrade not performed") + return + } + + err = upgradeVersion(test, version) + if err != nil { + GinkgoT().Errorf("error upgrading: %v\n", err) + return + } + + err = checkVersion(test) + if err != nil { + GinkgoT().Errorf(err.Error()) + return + } + + if test.TestConfig != nil { + TestCaseWrapper(test) + } + } +} diff --git a/tests/acceptance/core/testcase/cluster.go b/tests/acceptance/core/testcase/cluster.go new file mode 100644 index 00000000000..8d0453926a4 --- /dev/null +++ b/tests/acceptance/core/testcase/cluster.go @@ -0,0 +1,33 @@ +package testcase + +import ( + "fmt" + + "github.com/rancher/rke2/tests/acceptance/core/service/factory" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// TestBuildCluster test the creation of a cluster using terraform +func TestBuildCluster(g GinkgoTInterface, destroy bool) { + status, err := factory.BuildCluster(g, destroy) + if err != nil { + return + } + Expect(status).To(Equal("cluster created")) + + util.PrintFileContents(util.KubeConfigFile) + Expect(util.KubeConfigFile).ShouldNot(BeEmpty()) + Expect(util.ServerIPs).ShouldNot(BeEmpty()) + + fmt.Println("Server Node IPS:", util.ServerIPs) + fmt.Println("Agent Node IPS:", util.AgentIPs) + + if util.NumAgents > 0 { + Expect(util.AgentIPs).ShouldNot(BeEmpty()) + } else { + Expect(util.AgentIPs).Should(BeEmpty()) + } +} diff --git a/tests/acceptance/core/testcase/coredns.go b/tests/acceptance/core/testcase/coredns.go new file mode 100644 index 00000000000..cdbfd883869 --- /dev/null +++ b/tests/acceptance/core/testcase/coredns.go @@ -0,0 +1,34 @@ +package testcase + +import ( + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/gomega" +) + +func TestCoredns(deployWorkload bool) { + if deployWorkload { + _, err := util.ManageWorkload("create", "dnsutils.yaml") + Expect(err).NotTo(HaveOccurred(), + "dnsutils manifest not deployed", err) + } + _, err := util.AddHelmRepo("traefik", "https://helm.traefik.io/traefik") + if err != nil { + return + } + + err = assert.ValidateOnHost(util.ExecDnsUtils+util.KubeConfigFile+ + " -- nslookup kubernetes.default", util.Nslookup) + if err != nil { + return + } + + ips := util.FetchNodeExternalIP() + for _, ip := range ips { + err = assert.ValidateOnHost(ip, util.HelmListCoreDns, "rke2-coredns-1.19.402") + if err != nil { + return + } + } +} diff --git a/tests/acceptance/core/testcase/daemonset.go b/tests/acceptance/core/testcase/daemonset.go new file mode 100644 index 00000000000..1d6d776acd3 --- /dev/null +++ b/tests/acceptance/core/testcase/daemonset.go @@ -0,0 +1,23 @@ +package testcase + +import ( + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/gomega" +) + +func TestDaemonset(deployWorkload bool) { + if deployWorkload { + _, err := util.ManageWorkload("create", "daemonset.yaml") + Expect(err).NotTo(HaveOccurred(), + "Daemonset manifest not deployed") + } + nodes, _ := util.WorkerNodes(false) + pods, _ := util.Pods(false) + + Eventually(func(g Gomega) { + count := util.CountOfStringInSlice(util.TestDaemonset, pods) + g.Expect(count).Should(Equal(len(nodes)), + "Daemonset pod count does not match node count") + }, "420s", "5s").Should(Succeed()) +} diff --git a/tests/acceptance/core/testcase/ingressdns.go b/tests/acceptance/core/testcase/ingressdns.go new file mode 100644 index 00000000000..185bcb6520f --- /dev/null +++ b/tests/acceptance/core/testcase/ingressdns.go @@ -0,0 +1,61 @@ +package testcase + +import ( + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestIngress(deployWorkload bool) { + var ingressIps []string + if deployWorkload { + _, err := util.ManageWorkload("create", "ingress.yaml") + Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") + } + + err := assert.ValidateOnHost(util.GetIngressRunning+util.KubeConfigFile, util.RunningAssert) + if err != nil { + GinkgoT().Errorf("Error: %v", err) + } + + nodes, err := util.Nodes(false) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func(Gomega) bool { + ingressIps, err = util.FetchIngressIP(util.IngressNamespace) + if err != nil { + return false + } + if len(ingressIps) != len(nodes) { + return false + } + return true + }, "400s", "3s").Should(BeTrue()) + + for _, ip := range ingressIps { + if assert.CheckComponentCmdHost("curl -s --header host:foo1.bar.com"+" "+ + "http://"+ip+"/name.html", util.TestIngress); err != nil { + return + } + } +} + +func TestDnsAccess(deployWorkload bool) { + if deployWorkload { + _, err := util.ManageWorkload("create", "dnsutils.yaml") + Expect(err).NotTo(HaveOccurred(), + "dnsutils manifest not deployed", err) + } + + err := assert.ValidateOnHost(util.GetDnsUtils+util.KubeConfigFile, util.RunningAssert) + if err != nil { + GinkgoT().Errorf("Error: %v", err) + } + + assert.CheckComponentCmdHost( + util.ExecDnsUtils+util.KubeConfigFile+" -- nslookup kubernetes.default", + util.Nslookup, + ) +} diff --git a/tests/acceptance/core/testcase/node.go b/tests/acceptance/core/testcase/node.go new file mode 100644 index 00000000000..6f0de39fe5a --- /dev/null +++ b/tests/acceptance/core/testcase/node.go @@ -0,0 +1,35 @@ +package testcase + +import ( + "fmt" + + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/gomega" +) + +// TestNodeStatus test the status of the nodes in the cluster using 2 custom assert functions +func TestNodeStatus( + nodeAssertReadyStatus assert.NodeAssertFunc, + nodeAssertVersion assert.NodeAssertFunc, +) { + fmt.Printf("\nFetching node status\n") + + expectedNodeCount := util.NumServers + util.NumAgents + Eventually(func(g Gomega) { + nodes, err := util.Nodes(false) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(len(nodes)).To(Equal(expectedNodeCount), + "Number of nodes should match the spec") + + for _, node := range nodes { + if nodeAssertReadyStatus != nil { + nodeAssertReadyStatus(g, node) + } + if nodeAssertVersion != nil { + nodeAssertVersion(g, node) + } + } + }, "800s", "3s").Should(Succeed()) +} diff --git a/tests/acceptance/core/testcase/pod.go b/tests/acceptance/core/testcase/pod.go new file mode 100644 index 00000000000..16ae491b242 --- /dev/null +++ b/tests/acceptance/core/testcase/pod.go @@ -0,0 +1,48 @@ +package testcase + +import ( + "fmt" + "strings" + + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/gomega" +) + +// TestPodStatus test the status of the pods in the cluster using 2 custom assert functions +func TestPodStatus( + podAssertRestarts assert.PodAssertFunc, + podAssertReady assert.PodAssertFunc, + podAssertStatus assert.PodAssertFunc, +) { + fmt.Printf("\nFetching pod status\n") + + Eventually(func(g Gomega) { + pods, err := util.Pods(false) + g.Expect(err).NotTo(HaveOccurred()) + + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal(util.CompletedAssert), pod.Name) + } else if strings.Contains(pod.Name, "apply") && + strings.Contains(pod.NameSpace, "system-upgrade") { + g.Expect(pod.Status).Should(SatisfyAny( + ContainSubstring("Error"), + Equal(util.CompletedAssert), + ), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal(util.RunningAssert), pod.Name) + if podAssertRestarts != nil { + podAssertRestarts(g, pod) + } + if podAssertReady != nil { + podAssertReady(g, pod) + } + if podAssertStatus != nil { + podAssertStatus(g, pod) + } + } + } + }, "600s", "3s").Should(Succeed()) +} diff --git a/tests/acceptance/core/testcase/service.go b/tests/acceptance/core/testcase/service.go new file mode 100644 index 00000000000..8e5309fc281 --- /dev/null +++ b/tests/acceptance/core/testcase/service.go @@ -0,0 +1,56 @@ +package testcase + +import ( + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestServiceClusterIp(deployWorkload bool) { + if deployWorkload { + _, err := util.ManageWorkload("create", "clusterip.yaml") + Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed") + } + + err := assert.ValidateOnHost(util.GetClusterIp+util.KubeConfigFile, util.RunningAssert) + if err != nil { + GinkgoT().Errorf("Error: %v", err) + } + + clusterip, port, _ := util.FetchClusterIP(util.ClusterIpNamespace, + util.NginxClusterIpSVC) + nodeExternalIP := util.FetchNodeExternalIP() + for _, ip := range nodeExternalIP { + err = assert.ValidateOnNode(ip, "curl -sL --insecure http://"+clusterip+ + ":"+port+"/name.html", util.TestClusterip) + if err != nil { + GinkgoT().Errorf("Error: %v", err) + } + } +} + +func TestServiceNodePort(deployWorkload bool) { + if deployWorkload { + _, err := util.ManageWorkload("create", "nodeport.yaml") + Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed") + } + + nodeExternalIP := util.FetchNodeExternalIP() + nodeport, err := util.RunCommandHost(util.GetNodePortSVC + util.KubeConfigFile) + if err != nil { + GinkgoT().Errorf("Error: %v", err) + } + + for _, ip := range nodeExternalIP { + assert.CheckPodStatusRunning(util.NginxAppNodePortSVC, + util.NodePortNamespace, util.TestNodePort) + + assert.CheckComponentCmdNode("curl -sL --insecure http://"+ip+":"+nodeport+"/name.html", + util.TestNodePort, ip) + if err != nil { + GinkgoT().Errorf("Error: %v", err) + } + } +} diff --git a/tests/acceptance/core/testcase/upgradecluster.go b/tests/acceptance/core/testcase/upgradecluster.go new file mode 100644 index 00000000000..56e4f60a7aa --- /dev/null +++ b/tests/acceptance/core/testcase/upgradecluster.go @@ -0,0 +1,165 @@ +package testcase + +import ( + "fmt" + "os" + "strings" + "sync" + "time" + + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/core/service/customflag" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// TestUpgradeClusterSUC upgrades cluster using the system-upgrade-controller. +func TestUpgradeClusterSUC(version string) error { + fmt.Printf("\nUpgrading cluster to version: %s\n", version) + + _, err := util.ManageWorkload("create", "suc.yaml") + Expect(err).NotTo(HaveOccurred(), + "system-upgrade-controller manifest did not deploy successfully") + + assert.CheckComponentCmdHost( + util.GetPodsSystemUpgrade+util.KubeConfigFile, + util.SUC, + util.RunningAssert, + ) + Expect(err).NotTo(HaveOccurred()) + + originalFilePath := util.BasePath() + "/fixtures/workloads" + "/upgrade-plan.yaml" + newFilePath := util.BasePath() + "/fixtures/workloads" + "/plan.yaml" + + content, err := os.ReadFile(originalFilePath) + if err != nil { + return fmt.Errorf("failed to read file: %s", err) + } + + newContent := strings.ReplaceAll(string(content), "$UPGRADEVERSION", version) + err = os.WriteFile(newFilePath, []byte(newContent), 0644) + if err != nil { + return fmt.Errorf("failed to write file: %s", err) + } + + _, err = util.ManageWorkload("create", "plan.yaml") + Expect(err).NotTo(HaveOccurred(), "failed to upgrade cluster.") + + return nil +} + +// TestUpgradeClusterManually upgrades cluster "manually" +func TestUpgradeClusterManually(version string) error { + if version == "" { + return fmt.Errorf("please provide a non-empty rke2 version to upgrade to") + } + + serverIPs := strings.Split(util.ServerIPs, ",") + agentIPs := strings.Split(util.AgentIPs, ",") + + if util.NumServers == 0 && util.NumAgents == 0 { + return fmt.Errorf("no nodes found to upgrade") + } + + if util.NumServers > 0 { + if err := upgradeServer(version, serverIPs); err != nil { + return err + } + } + + if util.NumAgents > 0 { + if err := upgradeAgent(version, agentIPs); err != nil { + return err + } + } + + return nil +} + +// upgradeServer upgrades servers in cluster,it will spawn a go routine per server ip. +func upgradeServer(installType string, serverIPs []string) error { + var wg sync.WaitGroup + errCh := make(chan error, len(serverIPs)) + + for _, ip := range serverIPs { + switch { + case customflag.InstallType.Version != "": + installType = fmt.Sprintf("INSTALL_RKE2_VERSION=%s", customflag.InstallType.Version) + case customflag.InstallType.Commit != "": + installType = fmt.Sprintf("INSTALL_RKE2_COMMIT=%s", customflag.InstallType.Commit) + } + + upgradeCommand := fmt.Sprintf(util.InstallRke2Server, installType) + wg.Add(1) + go func(ip, installFlagServer string) { + defer wg.Done() + defer GinkgoRecover() + + fmt.Println("Upgrading server to: " + upgradeCommand) + if _, err := util.RunCommandOnNode(upgradeCommand, ip); err != nil { + fmt.Printf("\nError upgrading server %s: %v\n\n", ip, err) + errCh <- err + close(errCh) + return + } + + fmt.Println("Restarting server: " + ip) + if _, err := util.RestartCluster(ip); err != nil { + fmt.Printf("\nError restarting server %s: %v\n\n", ip, err) + errCh <- err + close(errCh) + return + } + time.Sleep(30 * time.Second) + }(ip, installType) + } + wg.Wait() + close(errCh) + + return nil +} + +// upgradeAgent upgrades agents in cluster, it will spawn a go routine per agent ip. +func upgradeAgent(installType string, agentIPs []string) error { + var wg sync.WaitGroup + errCh := make(chan error, len(agentIPs)) + + for _, ip := range agentIPs { + switch { + case customflag.InstallType.Version != "": + installType = fmt.Sprintf("INSTALL_RKE2_VERSION=%s", customflag.InstallType.Version) + case customflag.InstallType.Commit != "": + installType = fmt.Sprintf("INSTALL_RKE2_COMMIT=%s", customflag.InstallType.Commit) + } + + upgradeCommand := fmt.Sprintf(util.InstallRke2Agent, installType) + wg.Add(1) + go func(ip, installFlagAgent string) { + defer wg.Done() + defer GinkgoRecover() + + fmt.Println("Upgrading agent to: " + upgradeCommand) + if _, err := util.RunCommandOnNode(upgradeCommand, ip); err != nil { + fmt.Printf("\nError upgrading agent %s: %v\n\n", ip, err) + errCh <- err + close(errCh) + return + } + + fmt.Println("Restarting agent: " + ip) + if _, err := util.RestartCluster(ip); err != nil { + fmt.Printf("\nError restarting agent %s: %v\n\n", ip, err) + errCh <- err + close(errCh) + return + + } + }(ip, installType) + } + wg.Wait() + close(errCh) + + return nil +} diff --git a/tests/acceptance/entrypoint/createcluster/createcluster_suite_test.go b/tests/acceptance/entrypoint/createcluster/createcluster_suite_test.go new file mode 100644 index 00000000000..263f5a8ec65 --- /dev/null +++ b/tests/acceptance/entrypoint/createcluster/createcluster_suite_test.go @@ -0,0 +1,28 @@ +package createcluster + +import ( + "flag" + "testing" + + "github.com/rancher/rke2/tests/acceptance/core/service/factory" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestClusterCreateSuite(t *testing.T) { + RegisterFailHandler(Fail) + flag.Parse() + + RunSpecs(t, "Create Cluster Test Suite") +} + +var _ = AfterSuite(func() { + g := GinkgoT() + if *util.Destroy { + status, err := factory.BuildCluster(g, *util.Destroy) + Expect(err).NotTo(HaveOccurred()) + Expect(status).To(Equal("cluster destroyed")) + } +}) diff --git a/tests/acceptance/entrypoint/createcluster/createcluster_test.go b/tests/acceptance/entrypoint/createcluster/createcluster_test.go new file mode 100644 index 00000000000..2137038f944 --- /dev/null +++ b/tests/acceptance/entrypoint/createcluster/createcluster_test.go @@ -0,0 +1,75 @@ +package createcluster + +import ( + "fmt" + + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/core/testcase" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("Test:", func() { + + Context("Build Cluster:", func() { + + It("Start Up with no issues", func() { + testcase.TestBuildCluster(GinkgoT(), false) + }) + + It("Checks Node Status", func() { + testcase.TestNodeStatus( + assert.NodeAssertReadyStatus(), + nil, + ) + }) + + It("Checks Pod Status", func() { + testcase.TestPodStatus( + assert.PodAssertRestart(), + assert.PodAssertReady(), + assert.PodAssertStatus(), + ) + }) + + It("Verifies ClusterIP Service", func() { + testcase.TestServiceClusterIp(true) + defer util.ManageWorkload("delete", "clusterip.yaml") + }) + + It("Verifies NodePort Service", func() { + testcase.TestServiceNodePort(true) + defer util.ManageWorkload("delete", "nodeport.yaml") + }) + + It("Verifies Ingress", func() { + testcase.TestIngress(true) + defer util.ManageWorkload("delete", "ingress.yaml") + }) + + It("Verifies Daemonset", func() { + testcase.TestDaemonset(true) + defer util.ManageWorkload("delete", "daemonset.yaml") + }) + + It("Verifies dns access", func() { + testcase.TestDnsAccess(true) + defer util.ManageWorkload("delete", "dnsutils.yaml") + }) + }) +}) + +var _ = BeforeEach(func() { + if *util.Destroy { + Skip("Cluster is being Deleted") + } +}) + +var _ = AfterEach(func() { + if CurrentSpecReport().Failed() { + fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) + } else { + fmt.Printf("\nPASSED! %s\n", CurrentSpecReport().FullText()) + } +}) diff --git a/tests/acceptance/entrypoint/upgradecluster/upgradecluster_suite_test.go b/tests/acceptance/entrypoint/upgradecluster/upgradecluster_suite_test.go new file mode 100644 index 00000000000..eae7608289d --- /dev/null +++ b/tests/acceptance/entrypoint/upgradecluster/upgradecluster_suite_test.go @@ -0,0 +1,39 @@ +package upgradecluster + +import ( + "flag" + "os" + "testing" + + "github.com/rancher/rke2/tests/acceptance/core/service/customflag" + "github.com/rancher/rke2/tests/acceptance/core/service/factory" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestMain(m *testing.M) { + flag.Var(&customflag.InstallType, "installtype", "Upgrade to run with type=value,"+ + "INSTALL_RKE2_VERSION=v1.26.2+rke2r1 or INSTALL_RKE2_COMMIT=1823dsad7129873192873129asd") + flag.Var(&customflag.UpgradeVersionSUC, "upgradeVersionSUC", "Upgrade SUC model") + + flag.Parse() + + os.Exit(m.Run()) +} + +func TestClusterUpgradeSuite(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Upgrade Cluster Test Suite") +} + +var _ = AfterSuite(func() { + g := GinkgoT() + if *util.Destroy { + status, err := factory.BuildCluster(g, *util.Destroy) + Expect(err).NotTo(HaveOccurred()) + Expect(status).To(Equal("cluster destroyed")) + } +}) diff --git a/tests/acceptance/entrypoint/upgradecluster/upgrademanual_test.go b/tests/acceptance/entrypoint/upgradecluster/upgrademanual_test.go new file mode 100644 index 00000000000..af03992e83d --- /dev/null +++ b/tests/acceptance/entrypoint/upgradecluster/upgrademanual_test.go @@ -0,0 +1,117 @@ +//go:build upgrademanual + +package upgradecluster + +import ( + "fmt" + + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/core/service/customflag" + "github.com/rancher/rke2/tests/acceptance/core/testcase" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("Test:", func() { + + Context("Build Cluster and validate", func() { + + It("Starts up with no issues", func() { + testcase.TestBuildCluster(GinkgoT(), false) + }) + + It("Checks Node Status", func() { + testcase.TestNodeStatus( + assert.NodeAssertReadyStatus(), + nil, + ) + }) + + It("Checks Pod Status", func() { + testcase.TestPodStatus( + assert.PodAssertRestart(), + assert.PodAssertReady(), + assert.PodAssertStatus(), + ) + }) + }) + + It("Verifies ClusterIP Service Pre upgrade", func() { + testcase.TestServiceClusterIp(true) + }) + + It("Verifies NodePort Service Pre upgrade", func() { + testcase.TestServiceNodePort(true) + }) + + It("Verifies Ingress Pre upgrade", func() { + testcase.TestIngress(true) + }) + + It("Verifies Daemonset Pre upgrade", func() { + testcase.TestDaemonset(true) + }) + + It("Verifies DNS Access Pre upgrade", func() { + testcase.TestDnsAccess(true) + }) + + It("Upgrade manual", func() { + _ = testcase.TestUpgradeClusterManually(customflag.InstallType.String()) + }) + + It("Checks Node Status pos upgrade", func() { + testcase.TestNodeStatus( + assert.NodeAssertReadyStatus(), + assert.NodeAssertVersionTypeUpgraded(&customflag.InstallType), + ) + }) + + It("Checks Pod Status pos upgrade", func() { + testcase.TestPodStatus( + nil, + assert.PodAssertReady(), + assert.PodAssertStatus(), + ) + }) + + It("Verifies ClusterIP Service Post upgrade", func() { + testcase.TestServiceClusterIp(false) + defer util.ManageWorkload("delete", "clusterip.yaml") + }) + + It("Verifies NodePort Service Post upgrade", func() { + testcase.TestServiceNodePort(false) + defer util.ManageWorkload("delete", "nodeport.yaml") + }) + + It("Verifies Ingress Post upgrade", func() { + testcase.TestIngress(false) + defer util.ManageWorkload("delete", "ingress.yaml") + }) + + It("Verifies Daemonset Post upgrade", func() { + testcase.TestDaemonset(false) + defer util.ManageWorkload("delete", "daemonset.yaml") + }) + + It("Verifies DNS Access Post upgrade", func() { + testcase.TestDnsAccess(true) + defer util.ManageWorkload("delete", "dns.yaml") + }) +}) + +var _ = BeforeEach(func() { + if *util.Destroy { + Skip("Cluster is being Deleted") + } +}) + +var _ = AfterEach(func() { + if CurrentSpecReport().Failed() { + fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) + } else { + fmt.Printf("\nPASSED! %s\n", CurrentSpecReport().FullText()) + } +}) diff --git a/tests/acceptance/entrypoint/upgradecluster/upgradesuc_test.go b/tests/acceptance/entrypoint/upgradecluster/upgradesuc_test.go new file mode 100644 index 00000000000..9fae1513390 --- /dev/null +++ b/tests/acceptance/entrypoint/upgradecluster/upgradesuc_test.go @@ -0,0 +1,110 @@ +//go:build upgradesuc + +package upgradecluster + +import ( + "fmt" + + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/core/service/customflag" + "github.com/rancher/rke2/tests/acceptance/core/testcase" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("SUC Upgrade Tests:", func() { + + It("Starts up with no issues", func() { + testcase.TestBuildCluster(GinkgoT(), false) + }) + + It("Checks Node Status", func() { + testcase.TestNodeStatus( + assert.NodeAssertReadyStatus(), + nil, + ) + }) + + It("Checks Pod Status", func() { + testcase.TestPodStatus( + assert.PodAssertRestart(), + assert.PodAssertReady(), + assert.PodAssertStatus(), + ) + }) + + It("Verifies ClusterIP Service pre upgrade", func() { + testcase.TestServiceClusterIp(true) + }) + + It("Verifies NodePort Service pre upgrade", func() { + testcase.TestServiceNodePort(true) + }) + + It("Verifies Ingress pre upgrade", func() { + testcase.TestIngress(true) + }) + + It("Verifies Daemonset pre upgrade", func() { + testcase.TestDaemonset(true) + }) + + It("Verifies DNS Access pre upgrade", func() { + testcase.TestDnsAccess(true) + }) + + It("\nUpgrade via SUC", func() { + err := testcase.TestUpgradeClusterSUC(customflag.UpgradeVersionSUC.String()) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Node Status pos upgrade suc", func() { + testcase.TestNodeStatus( + assert.NodeAssertReadyStatus(), + assert.NodeAssertVersionUpgraded(), + ) + }) + + It("Checks Pod Status pos upgrade suc", func() { + testcase.TestPodStatus( + nil, + assert.PodAssertReady(), + assert.PodAssertStatus(), + ) + }) + + It("Verifies ClusterIP Service pos upgrade", func() { + testcase.TestServiceClusterIp(false) + defer util.ManageWorkload("delete", "clusterip.yaml") + }) + + It("Verifies NodePort Service pos upgrade", func() { + testcase.TestServiceNodePort(false) + defer util.ManageWorkload("delete", "nodeport.yaml") + }) + + It("Verifies Ingress pos upgrade", func() { + testcase.TestIngress(false) + defer util.ManageWorkload("delete", "ingress.yaml") + }) + + It("Verifies Daemonset pos upgrade", func() { + testcase.TestDaemonset(false) + defer util.ManageWorkload("delete", "daemonset.yaml") + }) + + It("Verifies DNS Access pos upgrade", func() { + testcase.TestDnsAccess(true) + defer util.ManageWorkload("delete", "dns.yaml") + }) +}) + +var _ = AfterEach(func() { + if CurrentSpecReport().Failed() { + fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) + } else { + fmt.Printf("\nPASSED! %s\n", CurrentSpecReport().FullText()) + } +}) diff --git a/tests/acceptance/entrypoint/versionbump/version_suite_test.go b/tests/acceptance/entrypoint/versionbump/version_suite_test.go new file mode 100644 index 00000000000..6bf0ba8ee0a --- /dev/null +++ b/tests/acceptance/entrypoint/versionbump/version_suite_test.go @@ -0,0 +1,58 @@ +package versionbump + +import ( + "flag" + "fmt" + "os" + "testing" + + "github.com/rancher/rke2/tests/acceptance/core/service" + "github.com/rancher/rke2/tests/acceptance/core/service/customflag" + "github.com/rancher/rke2/tests/acceptance/core/service/factory" + "github.com/rancher/rke2/tests/acceptance/core/service/template" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestMain(m *testing.M) { + flag.StringVar(&service.CmdHost, "cmdHost", "", "Comma separated list of commands to execute on host") + flag.StringVar(&service.ExpectedValueHost, "expectedValueHost", "", "Comma separated list of expected values for host commands") + flag.StringVar(&service.CmdNode, "cmdNode", "", "Comma separated list of commands to execute on node") + flag.StringVar(&service.ExpectedValueNode, "expectedValueNode", "", "Comma separated list of expected values for node commands") + flag.StringVar(&service.ExpectedValueUpgradedHost, "expectedValueUpgradedHost", "", "Expected value of the command ran on Host after upgrading") + flag.StringVar(&service.ExpectedValueUpgradedNode, "expectedValueUpgradedNode", "", "Expected value of the command ran on Node after upgrading") + flag.Var(&customflag.InstallUpgradeFlag, "installUpgradeFlag", "Install upgrade customflag") + flag.StringVar(&service.Description, "description", "", "Description of the test") + flag.Var(&customflag.TestCase, "testCase", "Test case to run") + flag.BoolVar(&customflag.TestCase.DeployWorkload, "deployWorkload", false, "Deploy workload customflag") + + flag.Parse() + + testFunc, err := template.GetTestCase(customflag.TestCase.TestFuncName) + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + + if testFunc != nil { + customflag.TestCase.TestFunc = customflag.TestCaseFlagType(testFunc) + } + + os.Exit(m.Run()) +} + +func TestVersionTestSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Version Test Suite") +} + +var _ = AfterSuite(func() { + g := GinkgoT() + if *util.Destroy { + status, err := factory.BuildCluster(g, *util.Destroy) + Expect(err).NotTo(HaveOccurred()) + Expect(status).To(Equal("cluster destroyed")) + } +}) diff --git a/tests/acceptance/entrypoint/versionbump/versionbump_test.go b/tests/acceptance/entrypoint/versionbump/versionbump_test.go new file mode 100644 index 00000000000..c684d8c98a9 --- /dev/null +++ b/tests/acceptance/entrypoint/versionbump/versionbump_test.go @@ -0,0 +1,79 @@ +//go:build versionbump + +package versionbump + +import ( + "fmt" + + "github.com/rancher/rke2/tests/acceptance/core/service" + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/core/service/customflag" + "github.com/rancher/rke2/tests/acceptance/core/service/template" + "github.com/rancher/rke2/tests/acceptance/core/testcase" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("VersionTemplate Upgrade:", func() { + + It("Start Up with no issues", func() { + testcase.TestBuildCluster(GinkgoT(), false) + }) + + It("Check Node Status", func() { + testcase.TestNodeStatus( + assert.NodeAssertReadyStatus(), + nil, + ) + }) + + It("Checks Pod Status", func() { + testcase.TestPodStatus( + assert.PodAssertRestart(), + assert.PodAssertReady(), + assert.PodAssertStatus(), + ) + }) + + It("Test Bump version", func() { + template.VersionTemplate(template.VersionTestTemplate{ + Description: service.Description, + TestCombination: &template.RunCmd{ + RunOnHost: []template.TestMap{ + { + Cmd: service.CmdHost, + ExpectedValue: service.ExpectedValueHost, + ExpectedValueUpgrade: service.ExpectedValueUpgradedHost, + }, + }, + RunOnNode: []template.TestMap{ + { + Cmd: service.CmdNode, + ExpectedValue: service.ExpectedValueNode, + ExpectedValueUpgrade: service.ExpectedValueUpgradedNode, + }, + }, + }, + InstallUpgrade: customflag.InstallUpgradeFlag, + TestConfig: &template.TestConfig{ + TestFunc: template.TestCase(customflag.TestCase.TestFunc), + DeployWorkload: customflag.TestCase.DeployWorkload, + }, + }) + }) +}) + +var _ = BeforeEach(func() { + if *util.Destroy { + Skip("Cluster is being Deleted") + } +}) + +var _ = AfterEach(func() { + if CurrentSpecReport().Failed() { + fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) + } else { + fmt.Printf("\nPASSED! %s\n", CurrentSpecReport().FullText()) + } +}) diff --git a/tests/acceptance/entrypoint/versionbump/versioncoredns_test.go b/tests/acceptance/entrypoint/versionbump/versioncoredns_test.go new file mode 100644 index 00000000000..04f778469a8 --- /dev/null +++ b/tests/acceptance/entrypoint/versionbump/versioncoredns_test.go @@ -0,0 +1,72 @@ +//go:build coredns + +package versionbump + +import ( + "fmt" + + "github.com/rancher/rke2/tests/acceptance/core/service" + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/core/service/customflag" + "github.com/rancher/rke2/tests/acceptance/core/service/template" + "github.com/rancher/rke2/tests/acceptance/core/testcase" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("VersionTemplate Upgrade:", func() { + + It("Start Up with no issues", func() { + testcase.TestBuildCluster(GinkgoT(), false) + }) + + It("Check Node Status", func() { + testcase.TestNodeStatus( + assert.NodeAssertReadyStatus(), + nil, + ) + }) + + It("Checks Pod Status", func() { + testcase.TestPodStatus( + assert.PodAssertRestart(), + assert.PodAssertReady(), + assert.PodAssertStatus(), + ) + }) + + It("Test Bump version CoreDNS", func() { + template.VersionTemplate(template.VersionTestTemplate{ + Description: "Test CoreDNS bump", + TestCombination: &template.RunCmd{ + RunOnHost: []template.TestMap{ + { + Cmd: util.GetCoreDNSdeployImage, + ExpectedValue: service.ExpectedValueHost, + ExpectedValueUpgrade: service.ExpectedValueUpgradedHost, + }, + }, + }, + InstallUpgrade: customflag.InstallUpgradeFlag, + TestConfig: &template.TestConfig{ + TestFunc: testcase.TestCoredns, + DeployWorkload: true, + }, + }) + }) +}) + +var _ = BeforeEach(func() { + if *util.Destroy { + Skip("Cluster is being Deleted") + } +}) + +var _ = AfterEach(func() { + if CurrentSpecReport().Failed() { + fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) + } else { + fmt.Printf("\nPASSED! %s\n", CurrentSpecReport().FullText()) + } +}) diff --git a/tests/acceptance/entrypoint/versionbump/versionrunc_test.go b/tests/acceptance/entrypoint/versionbump/versionrunc_test.go new file mode 100644 index 00000000000..978e25df450 --- /dev/null +++ b/tests/acceptance/entrypoint/versionbump/versionrunc_test.go @@ -0,0 +1,63 @@ +//go:build runc + +package versionbump + +import ( + "fmt" + + "github.com/rancher/rke2/tests/acceptance/core/service" + "github.com/rancher/rke2/tests/acceptance/core/service/assert" + "github.com/rancher/rke2/tests/acceptance/core/service/customflag" + "github.com/rancher/rke2/tests/acceptance/core/service/template" + "github.com/rancher/rke2/tests/acceptance/core/testcase" + "github.com/rancher/rke2/tests/acceptance/shared/util" + + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("VersionTemplate Upgrade:", func() { + + It("Start Up with no issues", func() { + testcase.TestBuildCluster(GinkgoT(), false) + }) + + It("Check Node Status", func() { + testcase.TestNodeStatus( + assert.NodeAssertReadyStatus(), + nil, + ) + }) + + It("Check Pod Status", func() { + testcase.TestPodStatus( + assert.PodAssertRestart(), + assert.PodAssertReady(), + assert.PodAssertStatus(), + ) + }) + + It("Verifies Runc bump", func() { + template.VersionTemplate(template.VersionTestTemplate{ + Description: "test runc bump", + TestCombination: &template.RunCmd{ + RunOnNode: []template.TestMap{ + { + Cmd: util.GetRuncVersion, + ExpectedValue: service.ExpectedValueNode, + ExpectedValueUpgrade: service.ExpectedValueUpgradedNode, + }, + }, + }, + InstallUpgrade: customflag.InstallUpgradeFlag, + TestConfig: nil, + }) + }) +}) + +var _ = AfterEach(func() { + if CurrentSpecReport().Failed() { + fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) + } else { + fmt.Printf("\nPASSED! %s\n", CurrentSpecReport().FullText()) + } +}) diff --git a/tests/terraform/resource_files/clusterip.yaml b/tests/acceptance/fixtures/workloads/clusterip.yaml similarity index 100% rename from tests/terraform/resource_files/clusterip.yaml rename to tests/acceptance/fixtures/workloads/clusterip.yaml diff --git a/tests/terraform/resource_files/daemonset.yaml b/tests/acceptance/fixtures/workloads/daemonset.yaml similarity index 100% rename from tests/terraform/resource_files/daemonset.yaml rename to tests/acceptance/fixtures/workloads/daemonset.yaml diff --git a/tests/terraform/resource_files/dnsutils.yaml b/tests/acceptance/fixtures/workloads/dnsutils.yaml similarity index 100% rename from tests/terraform/resource_files/dnsutils.yaml rename to tests/acceptance/fixtures/workloads/dnsutils.yaml diff --git a/tests/terraform/resource_files/ingress.yaml b/tests/acceptance/fixtures/workloads/ingress.yaml similarity index 100% rename from tests/terraform/resource_files/ingress.yaml rename to tests/acceptance/fixtures/workloads/ingress.yaml diff --git a/tests/terraform/resource_files/nodeport.yaml b/tests/acceptance/fixtures/workloads/nodeport.yaml similarity index 100% rename from tests/terraform/resource_files/nodeport.yaml rename to tests/acceptance/fixtures/workloads/nodeport.yaml diff --git a/tests/terraform/resource_files/suc.yaml b/tests/acceptance/fixtures/workloads/suc.yaml similarity index 100% rename from tests/terraform/resource_files/suc.yaml rename to tests/acceptance/fixtures/workloads/suc.yaml diff --git a/tests/acceptance/fixtures/workloads/traefiklogs.yaml b/tests/acceptance/fixtures/workloads/traefiklogs.yaml new file mode 100644 index 00000000000..bdefe87d126 --- /dev/null +++ b/tests/acceptance/fixtures/workloads/traefiklogs.yaml @@ -0,0 +1,13 @@ +apiVersion: helm.cattle.io/v1 +kind: HelmChartConfig +metadata: + name: traefik + namespace: kube-system +spec: + valuesContent: |- + service: + spec: + externalTrafficPolicy: Local + logs: + access: + enabled: true \ No newline at end of file diff --git a/tests/terraform/resource_files/upgrade-plan.yaml b/tests/acceptance/fixtures/workloads/upgrade-plan.yaml similarity index 83% rename from tests/terraform/resource_files/upgrade-plan.yaml rename to tests/acceptance/fixtures/workloads/upgrade-plan.yaml index 9a574e6f7c1..5c576f10e9c 100644 --- a/tests/terraform/resource_files/upgrade-plan.yaml +++ b/tests/acceptance/fixtures/workloads/upgrade-plan.yaml @@ -30,8 +30,14 @@ spec: version: $UPGRADEVERSION nodeSelector: matchExpressions: - - {key: node-role.kubernetes.io/etcd, operator: In, values: ["true"]} - - {key: node-role.kubernetes.io/control-plane, operator: NotIn, values: ["true"]} + - key: "rke.cattle.io/etcd-role" + operator: Exists + - key: node-role.kubernetes.io/etcd + operator: In + values: [ "true" ] + - key: node-role.kubernetes.io/control-plane + operator: NotIn + values: [ "true" ] tolerations: - operator: Exists serviceAccountName: system-upgrade @@ -39,6 +45,8 @@ spec: image: rancher/rke2-upgrade args: ["prepare", "rke2-server-cp"] cordon: true + drain: + force: true upgrade: image: rancher/rke2-upgrade --- diff --git a/tests/terraform/go.mod b/tests/acceptance/go.mod similarity index 93% rename from tests/terraform/go.mod rename to tests/acceptance/go.mod index 6859cdc3b5f..874cd38b594 100644 --- a/tests/terraform/go.mod +++ b/tests/acceptance/go.mod @@ -1,4 +1,4 @@ -module github.com/rancher/rke2/tests/terraform +module github.com/rancher/rke2/tests/acceptance go 1.19 @@ -50,11 +50,11 @@ require ( github.com/ulikunitz/xz v0.5.10 // indirect github.com/zclconf/go-cty v1.9.1 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/net v0.8.0 // indirect + golang.org/x/net v0.9.0 // indirect golang.org/x/oauth2 v0.1.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + golang.org/x/tools v0.8.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/api v0.103.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/tests/terraform/go.sum b/tests/acceptance/go.sum similarity index 99% rename from tests/terraform/go.sum rename to tests/acceptance/go.sum index 20a94b6d336..969fad1fad4 100644 --- a/tests/terraform/go.sum +++ b/tests/acceptance/go.sum @@ -546,8 +546,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -652,12 +652,12 @@ golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -668,8 +668,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -725,8 +725,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/tests/terraform/modules/define_node_role.sh b/tests/acceptance/modules/install/define_node_role.sh similarity index 100% rename from tests/terraform/modules/define_node_role.sh rename to tests/acceptance/modules/install/define_node_role.sh diff --git a/tests/terraform/modules/install_rke2_master.sh b/tests/acceptance/modules/install/install_rke2_master.sh old mode 100755 new mode 100644 similarity index 100% rename from tests/terraform/modules/install_rke2_master.sh rename to tests/acceptance/modules/install/install_rke2_master.sh diff --git a/tests/terraform/modules/join_rke2_agent.sh b/tests/acceptance/modules/install/join_rke2_agent.sh old mode 100755 new mode 100644 similarity index 98% rename from tests/terraform/modules/join_rke2_agent.sh rename to tests/acceptance/modules/install/join_rke2_agent.sh index 4e3ee7092a3..98f08dcf8db --- a/tests/terraform/modules/join_rke2_agent.sh +++ b/tests/acceptance/modules/install/join_rke2_agent.sh @@ -79,4 +79,4 @@ then systemctl restart systemd-sysctl fi sudo systemctl enable rke2-agent -sudo systemctl start rke2-agent +sudo systemctl start rke2-agent \ No newline at end of file diff --git a/tests/terraform/modules/join_rke2_master.sh b/tests/acceptance/modules/install/join_rke2_master.sh old mode 100755 new mode 100644 similarity index 98% rename from tests/terraform/modules/join_rke2_master.sh rename to tests/acceptance/modules/install/join_rke2_master.sh index 1c1af0ec4cc..67a68dea283 --- a/tests/terraform/modules/join_rke2_master.sh +++ b/tests/acceptance/modules/install/join_rke2_master.sh @@ -85,4 +85,4 @@ then fi sudo systemctl enable rke2-server -sudo systemctl start --no-block rke2-server +sudo systemctl start --no-block rke2-server \ No newline at end of file diff --git a/tests/terraform/modules/optional_write_files.sh b/tests/acceptance/modules/install/optional_write_files.sh similarity index 99% rename from tests/terraform/modules/optional_write_files.sh rename to tests/acceptance/modules/install/optional_write_files.sh index 809b4542d28..0a3bcd7d6ea 100644 --- a/tests/terraform/modules/optional_write_files.sh +++ b/tests/acceptance/modules/install/optional_write_files.sh @@ -16,4 +16,4 @@ then curl -s "$raw_data" -o "$file_location" done -fi +fi \ No newline at end of file diff --git a/tests/terraform/modules/main.tf b/tests/acceptance/modules/main.tf similarity index 99% rename from tests/terraform/modules/main.tf rename to tests/acceptance/modules/main.tf index d4defe860c9..a700bbca079 100644 --- a/tests/terraform/modules/main.tf +++ b/tests/acceptance/modules/main.tf @@ -74,4 +74,4 @@ module "worker" { install_method = var.install_method rke2_channel = var.rke2_channel worker_flags = var.worker_flags -} +} \ No newline at end of file diff --git a/tests/terraform/modules/master/instances_server.tf b/tests/acceptance/modules/master/instances_server.tf similarity index 97% rename from tests/terraform/modules/master/instances_server.tf rename to tests/acceptance/modules/master/instances_server.tf index 2ba6d7f4e8f..05d0369a4f0 100644 --- a/tests/terraform/modules/master/instances_server.tf +++ b/tests/acceptance/modules/master/instances_server.tf @@ -21,7 +21,7 @@ resource "aws_instance" "master" { "kubernetes.io/cluster/clusterid" = "owned" } provisioner "file" { - source = "optional_write_files.sh" + source = "install/optional_write_files.sh" destination = "/tmp/optional_write_files.sh" } provisioner "remote-exec" { @@ -31,7 +31,7 @@ resource "aws_instance" "master" { ] } provisioner "file" { - source = "define_node_role.sh" + source = "install/define_node_role.sh" destination = "/tmp/define_node_role.sh" } provisioner "remote-exec" { @@ -41,7 +41,7 @@ resource "aws_instance" "master" { ] } provisioner "file" { - source = "install_rke2_master.sh" + source = "install/install_rke2_master.sh" destination = "/tmp/install_rke2_master.sh" } provisioner "remote-exec" { @@ -89,7 +89,7 @@ resource "aws_instance" "master2" { } depends_on = [aws_instance.master] provisioner "file" { - source = "optional_write_files.sh" + source = "install/optional_write_files.sh" destination = "/tmp/optional_write_files.sh" } provisioner "remote-exec" { @@ -99,7 +99,7 @@ resource "aws_instance" "master2" { ] } provisioner "file" { - source = "define_node_role.sh" + source = "install/define_node_role.sh" destination = "/tmp/define_node_role.sh" } provisioner "remote-exec" { @@ -109,7 +109,7 @@ resource "aws_instance" "master2" { ] } provisioner "file" { - source = "join_rke2_master.sh" + source = "install/join_rke2_master.sh" destination = "/tmp/join_rke2_master.sh" } provisioner "remote-exec" { diff --git a/tests/terraform/modules/master/outputs.tf b/tests/acceptance/modules/master/outputs.tf similarity index 100% rename from tests/terraform/modules/master/outputs.tf rename to tests/acceptance/modules/master/outputs.tf diff --git a/tests/terraform/modules/master/variables.tf b/tests/acceptance/modules/master/variables.tf similarity index 97% rename from tests/terraform/modules/master/variables.tf rename to tests/acceptance/modules/master/variables.tf index 9405da17654..d46deecaaa7 100644 --- a/tests/terraform/modules/master/variables.tf +++ b/tests/acceptance/modules/master/variables.tf @@ -43,4 +43,4 @@ variable "etcd_cp_nodes" {} variable "etcd_worker_nodes" {} variable "cp_only_nodes" {} variable "cp_worker_nodes" {} -variable "optional_files" {} +variable "optional_files" {} \ No newline at end of file diff --git a/tests/terraform/modules/outputs.tf b/tests/acceptance/modules/outputs.tf similarity index 99% rename from tests/terraform/modules/outputs.tf rename to tests/acceptance/modules/outputs.tf index 6c5d8c9792a..9b42ae9edf7 100644 --- a/tests/terraform/modules/outputs.tf +++ b/tests/acceptance/modules/outputs.tf @@ -11,4 +11,4 @@ output "worker_ips" { output "kubeconfig" { value = module.master.kubeconfig description = "kubeconfig of the cluster created" -} +} \ No newline at end of file diff --git a/tests/acceptance/modules/providers.tf b/tests/acceptance/modules/providers.tf new file mode 100644 index 00000000000..d9c349e24c9 --- /dev/null +++ b/tests/acceptance/modules/providers.tf @@ -0,0 +1,3 @@ +provider "aws" { + region = "${var.region}" +} \ No newline at end of file diff --git a/tests/terraform/modules/variables.tf b/tests/acceptance/modules/variables.tf similarity index 99% rename from tests/terraform/modules/variables.tf rename to tests/acceptance/modules/variables.tf index 8aecf32adf1..e7232ede809 100644 --- a/tests/terraform/modules/variables.tf +++ b/tests/acceptance/modules/variables.tf @@ -70,4 +70,4 @@ variable "cp_worker_nodes" { } variable "optional_files" { description = "File location and raw data url separate by commas, with a space for other pairs. E.g. file1,url1 file2,url2" -} +} \ No newline at end of file diff --git a/tests/terraform/modules/worker/instances_worker.tf b/tests/acceptance/modules/worker/instances_worker.tf similarity index 97% rename from tests/terraform/modules/worker/instances_worker.tf rename to tests/acceptance/modules/worker/instances_worker.tf index 545396f49cc..6e6b03a268a 100644 --- a/tests/terraform/modules/worker/instances_worker.tf +++ b/tests/acceptance/modules/worker/instances_worker.tf @@ -27,7 +27,7 @@ resource "aws_instance" "worker" { "kubernetes.io/cluster/clusterid" = "owned" } provisioner "file" { - source = "join_rke2_agent.sh" + source = "install/join_rke2_agent.sh" destination = "/tmp/join_rke2_agent.sh" } provisioner "remote-exec" { diff --git a/tests/terraform/modules/worker/outputs.tf b/tests/acceptance/modules/worker/outputs.tf similarity index 97% rename from tests/terraform/modules/worker/outputs.tf rename to tests/acceptance/modules/worker/outputs.tf index 87238eef9ae..473c4c8375e 100644 --- a/tests/terraform/modules/worker/outputs.tf +++ b/tests/acceptance/modules/worker/outputs.tf @@ -1,3 +1,3 @@ output "worker_ips" { value = join(",", aws_instance.worker.*.public_ip) -} +} \ No newline at end of file diff --git a/tests/terraform/modules/worker/variables.tf b/tests/acceptance/modules/worker/variables.tf similarity index 95% rename from tests/terraform/modules/worker/variables.tf rename to tests/acceptance/modules/worker/variables.tf index 4d509beb59d..6fca0ca6f6a 100644 --- a/tests/terraform/modules/worker/variables.tf +++ b/tests/acceptance/modules/worker/variables.tf @@ -27,4 +27,4 @@ variable "username" { default = "username" } variable "vpc_id" {} -variable "worker_flags" {} +variable "worker_flags" {} \ No newline at end of file diff --git a/tests/terraform/scripts/Dockerfile.build b/tests/acceptance/shared/scripts/Dockerfile.build similarity index 100% rename from tests/terraform/scripts/Dockerfile.build rename to tests/acceptance/shared/scripts/Dockerfile.build diff --git a/tests/terraform/scripts/Jenkinsfile b/tests/acceptance/shared/scripts/Jenkinsfile similarity index 85% rename from tests/terraform/scripts/Jenkinsfile rename to tests/acceptance/shared/scripts/Jenkinsfile index de955cf4dcf..f8d05e324b8 100644 --- a/tests/terraform/scripts/Jenkinsfile +++ b/tests/acceptance/shared/scripts/Jenkinsfile @@ -50,32 +50,32 @@ node { try { stage('Configure and Build') { if (env.AWS_SSH_PEM_KEY && env.AWS_SSH_KEY_NAME) { - dir("./tests/terraform/modules/config/.ssh") { + dir("./tests/acceptance/modules/config/.ssh") { def decoded = new String(AWS_SSH_PEM_KEY.decodeBase64()) writeFile file: "aws_key.pem", text: decoded } } - dir("./tests/terraform/modules/config") { + dir("./tests/acceptance/modules/config") { def filename = "local.tfvars" def configContents = env.TFVARS writeFile file: filename, text: configContents + "\npassword = \"" + RKE2_RHEL_PASSWORD + "\"" + "\nssh_key = \"" + AWS_SSH_KEY_NAME + "\"" + - "\naccess_key = \"/go/src/github.com/rancher/rke2/tests/terraform/modules/config/.ssh/aws_key.pem\"" + + "\naccess_key = \"/go/src/github.com/rancher/rke2/tests/acceptance/modules/config/.ssh/aws_key.pem\"" + "\nresource_name = \"" + RKE2_HOSTNAME_PREFIX + "\"" + "\nrke2_version = \"" + RKE2_VERSION + "\"" + "\nrke2_channel = \"" + RKE2_CHANNEL + "\"" } - sh "./tests/terraform/scripts/configure.sh" - sh "./tests/terraform/scripts/build.sh" + sh "./tests/acceptance/shared/scripts/configure.sh" + sh "./tests/acceptance/shared/scripts/build.sh" } - stage('Run Tests') { + stage('Run TestCombination') { sh "docker run --name ${testContainer} -t --env-file ${envFile} " + - "${imageName} sh -c \"chmod 400 /go/src/github.com/rancher/rke2/tests/terraform/modules/config/.ssh/aws_key.pem && " + - "cd ./tests/terraform && go test -timeout=${timeout} -v ./${RKE2_TESTCASE}/... ${RKE2_TEST_ARGS}\"" + "${imageName} sh -c \"chmod 400 /go/src/github.com/rancher/rke2/tests/acceptance/modules/config/.ssh/aws_key.pem && " + + "cd ./tests/acceptance/entrypoint && go test -timeout=${timeout} -v ./${RKE2_TESTCASE}/... ${RKE2_TEST_ARGS}\"" } } finally { stage('Cleanup') { diff --git a/tests/terraform/scripts/build.sh b/tests/acceptance/shared/scripts/build.sh similarity index 74% rename from tests/terraform/scripts/build.sh rename to tests/acceptance/shared/scripts/build.sh index b4b4b258db6..85759015703 100755 --- a/tests/terraform/scripts/build.sh +++ b/tests/acceptance/shared/scripts/build.sh @@ -14,7 +14,7 @@ fi count=0 while [[ 3 -gt $count ]]; do - docker build . -f tests/terraform/scripts/Dockerfile.build -t rke2-tf-"${TRIM_JOB_NAME}""${BUILD_NUMBER}" + docker build . -f tests/rke2tf/shared/scripts/Dockerfile.build -t rke2-tf-"${TRIM_JOB_NAME}""${BUILD_NUMBER}" if [[ $? -eq 0 ]]; then break; fi count=$(($count + 1)) diff --git a/tests/terraform/scripts/configure.sh b/tests/acceptance/shared/scripts/configure.sh similarity index 100% rename from tests/terraform/scripts/configure.sh rename to tests/acceptance/shared/scripts/configure.sh diff --git a/tests/acceptance/shared/scripts/delete_resources.sh b/tests/acceptance/shared/scripts/delete_resources.sh new file mode 100755 index 00000000000..020e51647de --- /dev/null +++ b/tests/acceptance/shared/scripts/delete_resources.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +#Get resource name from tfvarslocal && change name to make more sense in this context +RESOURCE_NAME=$(grep resource_name /dev/null 2>&1 + + +#Get the list of load balancer ARNs +lb_arn_list=$(aws elbv2 describe-load-balancers \ + --query "LoadBalancers[?starts_with(LoadBalancerName, '${name_prefix}') && Type=='network'].LoadBalancerArn" \ + --output text) + + +#Loop through the load balancer ARNs and delete the load balancers +for lb_arn in $lb_arn_list; do + echo "Deleting load balancer $lb_arn" + aws elbv2 delete-load-balancer --load-balancer-arn "$lb_arn" +done + + +#Get the list of target group ARNs +tg_arn_list=$(aws elbv2 describe-target-groups \ + --query "TargetGroups[?starts_with(TargetGroupName, '${name_prefix}') && Protocol=='TCP'].TargetGroupArn" \ + --output text) + + +#Loop through the target group ARNs and delete the target groups +for tg_arn in $tg_arn_list; do + echo "Deleting target group $tg_arn" + aws elbv2 delete-target-group --target-group-arn "$tg_arn" +done + + +#Get the ID and recordName with lower case of the hosted zone that contains the Route 53 record sets +name_prefix_lower=$(echo "$name_prefix" | tr '[:upper:]' '[:lower:]') +r53_zone_id=$(aws route53 list-hosted-zones-by-name --dns-name "${name_prefix}." \ + --query "HostedZones[0].Id" --output text) +r53_record=$(aws route53 list-resource-record-sets \ + --hosted-zone-id "${r53_zone_id}" \ + --query "ResourceRecordSets[?starts_with(Name, '${name_prefix_lower}.') && Type == 'CNAME'].Name" \ + --output text) + + +#Get ResourceRecord Value +record_value=$(aws route53 list-resource-record-sets \ + --hosted-zone-id "${r53_zone_id}" \ + --query "ResourceRecordSets[?starts_with(Name, '${name_prefix_lower}.') \ + && Type == 'CNAME'].ResourceRecords[0].Value" --output text) + + +#Delete Route53 record +if [[ "$r53_record" == "${name_prefix_lower}."* ]]; then + echo "Deleting Route53 record ${r53_record}" + change_status=$(aws route53 change-resource-record-sets --hosted-zone-id "${r53_zone_id}" \ + --change-batch '{"Changes": [ + { + "Action": "DELETE", + "ResourceRecordSet": { + "Name": "'"${r53_record}"'", + "Type": "CNAME", + "TTL": 300, + "ResourceRecords": [ + { + "Value": "'"${record_value}"'" + } + ] + } + } + ] + }') + status_id=$(echo "$change_status" | jq -r '.ChangeInfo.Id') + #Get status from the change + aws route53 wait resource-record-sets-changed --id "$status_id" + echo "Successfully deleted Route53 record ${r53_record}: status: ${status_id}" +else + echo "No Route53 record found" +fi \ No newline at end of file diff --git a/tests/acceptance/shared/util/aux.go b/tests/acceptance/shared/util/aux.go new file mode 100644 index 00000000000..6f38d06509c --- /dev/null +++ b/tests/acceptance/shared/util/aux.go @@ -0,0 +1,204 @@ +package util + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "golang.org/x/crypto/ssh" +) + +// RunCommandHost executes a command on the host +func RunCommandHost(cmds ...string) (string, error) { + if cmds == nil { + return "", fmt.Errorf("cmd should not be empty") + } + + var output bytes.Buffer + for _, cmd := range cmds { + c := exec.Command("bash", "-c", cmd) + + stdoutPipe, err := c.StdoutPipe() + if err != nil { + return "", err + } + stderrPipe, err := c.StderrPipe() + if err != nil { + return "", err + } + + err = c.Start() + if err != nil { + return "", err + } + + _, err = io.Copy(&output, stdoutPipe) + if err != nil { + return "", err + } + + _, err = io.Copy(&output, stderrPipe) + if err != nil { + return "", err + } + + err = c.Wait() + if err != nil { + return output.String(), fmt.Errorf("executing command: %s: %w", cmd, err) + } + } + + return output.String(), nil +} + +func RunCommandOnNode(cmd string, ServerIP string) (string, error) { + if cmd == "" { + return "", fmt.Errorf("cmd should not be empty") + } + + host := ServerIP + ":22" + conn, err := configureSSH(host) + if err != nil { + return fmt.Errorf("failed to configure SSH: %v", err).Error(), err + } + + stdout, stderr, err := runsshCommand(cmd, conn) + if err != nil { + return fmt.Errorf("\ncommand: %s \n failed with error: %v", cmd, err).Error(), err + } + + stdout = strings.TrimSpace(stdout) + stderr = strings.TrimSpace(stderr) + + if stderr != "" && (!strings.Contains(stderr, "error") || + !strings.Contains(stderr, "1") || + !strings.Contains(stderr, "2")) { + return stderr, nil + } else if stderr != "" { + return fmt.Errorf("\ncommand: %s \n failed with error: %v", cmd, stderr).Error(), err + } + + return stdout, err +} + +// BasePath returns the base path of the project. +func BasePath() string { + _, b, _, _ := runtime.Caller(0) + return filepath.Join(filepath.Dir(b), "../..") +} + +// PrintFileContents prints the contents of the file as [] string. +func PrintFileContents(f ...string) error { + for _, file := range f { + content, err := os.ReadFile(file) + if err != nil { + return err + } + fmt.Println(string(content) + "\n") + } + + return nil +} + +// CountOfStringInSlice Used to count the pods using prefix passed in the list of pods. +func CountOfStringInSlice(str string, pods []Pod) int { + var count int + for _, p := range pods { + if strings.Contains(p.Name, str) { + count++ + } + } + return count +} + +// GetRke2Version returns the rke2 version with commit hash +func GetRke2Version() string { + ips := FetchNodeExternalIP() + for _, ip := range ips { + res, err := RunCommandOnNode(GetVersion, ip) + if err != nil { + return err.Error() + } + return res + } + + return "" +} + +// AddHelmRepo adds a helm repo to the cluster. +func AddHelmRepo(name, url string) (string, error) { + addRepo := fmt.Sprintf("helm repo add %s %s", name, url) + installRepo := fmt.Sprintf("helm install %s %s/%s -n kube-system", name, name, name) + + nodeExternalIP := FetchNodeExternalIP() + for _, ip := range nodeExternalIP { + _, err := RunCommandOnNode(InstallHelm, ip) + if err != nil { + return "", err + } + } + return RunCommandHost(addRepo, installRepo) +} + +func publicKey(path string) (ssh.AuthMethod, error) { + key, err := os.ReadFile(path) + if err != nil { + return nil, err + } + signer, err := ssh.ParsePrivateKey(key) + if err != nil { + return nil, err + } + + return ssh.PublicKeys(signer), nil +} + +func configureSSH(host string) (*ssh.Client, error) { + var config *ssh.ClientConfig + + authMethod, err := publicKey(AccessKey) + if err != nil { + return nil, err + } + config = &ssh.ClientConfig{ + User: AwsUser, + Auth: []ssh.AuthMethod{ + authMethod, + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + conn, err := ssh.Dial("tcp", host, config) + if err != nil { + return nil, err + } + + return conn, nil +} + +func runsshCommand(cmd string, conn *ssh.Client) (string, string, error) { + session, err := conn.NewSession() + if err != nil { + return "", "", err + } + defer session.Close() + + var stdoutBuf bytes.Buffer + var stderrBuf bytes.Buffer + session.Stdout = &stdoutBuf + session.Stderr = &stderrBuf + + errssh := session.Run(cmd) + stdoutStr := stdoutBuf.String() + stderrStr := stderrBuf.String() + + if errssh != nil { + return stdoutStr, stderrStr, fmt.Errorf("error on command execution: %v", errssh) + } + + return stdoutStr, stderrStr, nil +} diff --git a/tests/acceptance/shared/util/cluster.go b/tests/acceptance/shared/util/cluster.go new file mode 100644 index 00000000000..2742ac94a9e --- /dev/null +++ b/tests/acceptance/shared/util/cluster.go @@ -0,0 +1,286 @@ +package util + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/onsi/gomega" +) + +type Node struct { + Name string + Status string + Roles string + Version string + InternalIP string + ExternalIP string +} + +type Pod struct { + NameSpace string + Name string + Ready string + Status string + Restarts string + NodeIP string + Node string +} + +// ManageWorkload creates or deletes a workload based on the action: create or delete. +func ManageWorkload(action, workload string) (string, error) { + if action != "create" && action != "delete" { + return "", fmt.Errorf("invalid action: %s. Must be 'create' or 'delete'", action) + } + var res string + var err error + + resourceDir := BasePath() + "/fixtures/workloads/" + + files, err := os.ReadDir(resourceDir) + if err != nil { + err = fmt.Errorf("%s : Unable to read resource manifest file for %s", err, workload) + return "", err + } + + for _, f := range files { + filename := filepath.Join(resourceDir, f.Name()) + if strings.TrimSpace(f.Name()) == workload { + if action == "create" { + res, err = createWorkload(workload, filename) + if err != nil { + return "", fmt.Errorf("failed to create workload %s: %s", workload, err) + } + } else { + res, err = deleteWorkload(workload, filename) + if err != nil { + return "", fmt.Errorf("failed to delete workload %s: %s", workload, err) + } + } + return res, err + } + } + + return "", fmt.Errorf("workload %s not found", workload) +} + +// createWorkload creates a workload. +func createWorkload(workload, filename string) (string, error) { + fmt.Println("\nDeploying", workload) + return RunCommandHost("kubectl apply -f " + filename + " --kubeconfig=" + KubeConfigFile) + +} + +// deleteWorkload deletes a workload and asserts that the workload is deleted. +func deleteWorkload(workload, filename string) (string, error) { + fmt.Println("\nRemoving", workload) + cmd := "kubectl delete -f " + filename + " --kubeconfig=" + KubeConfigFile + + gomega.Eventually(func(g gomega.Gomega) { + isDeleted, err := IsWorkloadDeleted(workload) + g.Expect(err).To(gomega.BeNil()) + g.Expect(isDeleted).To(gomega.BeTrue(), + "Workload should be deleted") + }, "60s", "5s").Should(gomega.Succeed()) + + return RunCommandHost(cmd) +} + +// IsWorkloadDeleted returns true if the workload is deleted. +func IsWorkloadDeleted(workload string) (bool, error) { + res, err := RunCommandHost(GetAll + KubeConfigFile) + if err != nil { + return false, err + } + + return !strings.Contains(res, workload), nil +} + +// KubectlCommand return results from various commands, it receives an "action" , source and args. +// it already has KubeConfigFile +// +// destination = host or node +// +// action = get,describe... +// +// source = pods, node , exec, service ... +// +// args = the rest of your command arguments. +func KubectlCommand(destination, action, source string, args ...string) (string, error) { + var cmd string + var res string + var err error + kubeconfigFlag := " --kubeconfig=" + KubeConfigFile + + if destination == "host" { + cmd = addKubectlCommand(action, source, args) + kubeconfigFlag + res, err = RunCommandHost(cmd) + if err != nil { + return "", err + } + } else if destination == "node" { + cmd = addKubectlCommand(action, source, args) + kubeconfigFlag + ips := FetchNodeExternalIP() + for _, ip := range ips { + res, err = RunCommandOnNode(cmd, ip) + if err != nil { + return "", err + } + } + } else { + return "", fmt.Errorf("invalid destination: %s", destination) + } + + return res, nil +} + +// addKubectlCommand using a specific action + source maps the args received to create a kubectl command. +func addKubectlCommand(action, source string, args []string) string { + shortCmd := map[string]string{ + "get": "kubectl get", + "describe": "kubectl describe", + "exec": "kubectl exec", + "delete": "kubectl delete", + "apply": "kubectl apply", + } + + cmdPrefix, ok := shortCmd[action] + if !ok { + cmdPrefix = action + } + + return cmdPrefix + " " + source + " " + strings.Join(args, " ") +} + +// Nodes returns the list of nodes in the cluster and parses the output with parseNodes. +func Nodes(print bool) ([]Node, error) { + return parseNodes(GetNodesWide+KubeConfigFile, print) +} + +// WorkerNodes returns the list of worker nodes in the cluster. +func WorkerNodes(print bool) ([]Node, error) { + return parseNodes(GetWorkerNodes+KubeConfigFile+GrepNoExec, print) +} + +// Pods returns the list of pods in the cluster and parses the output with parsePods. +func Pods(print bool) ([]Pod, error) { + return parsePods(GetPodsWide+KubeConfigFile, print) +} + +// FetchClusterIP returns the cluster IP and port of the service. +func FetchClusterIP( + namespace string, + serviceName string, +) (string, string, error) { + ip, err := RunCommandHost("kubectl get svc " + serviceName + " -n " + namespace + + " -o jsonpath='{.spec.clusterIP}' --kubeconfig=" + KubeConfigFile) + if err != nil { + return "", "", err + } + + port, err := RunCommandHost("kubectl get svc " + serviceName + " -n " + namespace + + " -o jsonpath='{.spec.ports[0].port}' --kubeconfig=" + KubeConfigFile) + if err != nil { + return "", "", err + } + + return ip, port, err +} + +// FetchNodeExternalIP returns the external IP of the nodes. +func FetchNodeExternalIP() []string { + res, _ := RunCommandHost(GetNodesExternalIp + KubeConfigFile) + nodeExternalIP := strings.Trim(res, " ") + nodeExternalIPs := strings.Split(nodeExternalIP, " ") + + return nodeExternalIPs +} + +// RestartCluster restarts the rke2 service on each node given by external IP. +func RestartCluster(ip string) (string, error) { + return RunCommandOnNode(RestartRKE2, ip) +} + +// FetchIngressIP returns the ingress IP of the given namespace +func FetchIngressIP(namespace string) ([]string, error) { + res, err := RunCommandHost( + "kubectl get ingress -n " + + namespace + + " -o jsonpath='{.items[0].status.loadBalancer.ingress[*].ip}' --kubeconfig=" + + KubeConfigFile, + ) + if err != nil { + return nil, err + } + + ingressIP := strings.Trim(res, " ") + if ingressIP == "" { + return nil, nil + } + ingressIPs := strings.Split(ingressIP, " ") + + return ingressIPs, nil +} + +func parseNodes(cmd string, print bool) ([]Node, error) { + nodes := make([]Node, 0, 10) + + res, err := RunCommandHost(cmd) + if err != nil { + return nil, err + } + + nodelist := strings.TrimSpace(res) + split := strings.Split(nodelist, "\n") + for _, rec := range split { + if strings.TrimSpace(rec) != "" { + fields := strings.Fields(rec) + n := Node{ + Name: fields[0], + Status: fields[1], + Roles: fields[2], + Version: fields[4], + InternalIP: fields[5], + ExternalIP: fields[6], + } + nodes = append(nodes, n) + } + } + if print { + fmt.Println(nodelist) + } + + return nodes, nil +} + +func parsePods(cmd string, print bool) ([]Pod, error) { + pods := make([]Pod, 0, 10) + + res, err := RunCommandHost(cmd) + if err != nil { + return nil, err + } + + podList := strings.TrimSpace(res) + + split := strings.Split(podList, "\n") + for _, rec := range split { + fields := strings.Fields(rec) + p := Pod{ + NameSpace: fields[0], + Name: fields[1], + Ready: fields[2], + Status: fields[3], + Restarts: fields[4], + NodeIP: fields[6], + Node: fields[7], + } + pods = append(pods, p) + } + if print { + fmt.Println(podList) + } + + return pods, nil +} diff --git a/tests/acceptance/shared/util/constants.go b/tests/acceptance/shared/util/constants.go new file mode 100644 index 00000000000..7b2f6965c72 --- /dev/null +++ b/tests/acceptance/shared/util/constants.go @@ -0,0 +1,71 @@ +package util + +import ( + "flag" +) + +// global configurations and flags +var ( + Destroy = flag.Bool("destroy", false, "a bool") + + KubeConfigFile string + ServerIPs string + AgentIPs string + NumServers int + NumAgents int + AwsUser string + AccessKey string +) + +// global cmds +var ( + GrepNoExec = " | grep -v NoSchedule | grep -v NoExecute" + GetAll = "kubectl get all -A --kubeconfig=" + GetNodesWide = "kubectl get nodes --no-headers -o wide --kubeconfig=" + GetPodsWide = "kubectl get pods -o wide --no-headers -A --kubeconfig=" + GetNodesExternalIp = "kubectl get nodes --output=jsonpath='{.items[*].status.addresses[?(@.type==\"ExternalIP\")].address}' --kubeconfig=" + ExecDnsUtils = "kubectl exec -n auto-dns -t dnsutils --kubeconfig=" + GetDnsUtils = "kubectl get pods -n auto-dns dnsutils --kubeconfig=" + GetNodePortSVC = "kubectl get service -n auto-nodeport nginx-nodeport-svc --output jsonpath={.spec.ports[0].nodePort} --kubeconfig=" + GetPodsSystemUpgrade = "kubectl get pods -n system-upgrade --kubeconfig=" + GetIngressRunning = "kubectl get pods -n auto-ingress -l k8s-app=nginx-app-ingress --field-selector=status.phase=Running --kubeconfig=" + GetClusterIp = "kubectl get pods -n auto-clusterip -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + GetCoreDNSdeployImage = "kubectl get deploy rke2-coredns-rke2-coredns -n kube-system -o jsonpath='{.spec.template.spec.containers[?(@.name==\"coredns\")].image}'" + + GetWorkerNodes = "kubectl get node -o jsonpath='{range .items[*]}{@.metadata.name} " + + "{@.status.conditions[-1].type} " + + "{@.status.nodeInfo.kubeletVersion} " + + "{@.status.addresses[?(@.type==\"InternalIP\")].address} " + + "{@.status.addresses[?(@.type==\"ExternalIP\")].address} " + + "{@.spec.taints[*].effect}{\"\\n\"}{end}' " + + "--kubeconfig=" + HelmListCoreDns = "helm list --all-namespaces | grep rke2-coredns" +) + +// names, asserts and cmds +const ( + RestartRKE2 = "sudo systemctl restart rke2-*" + NginxAppClusterIp = "nginx-app-clusterip" + TestClusterip = "test-clusterip" + NginxClusterIpSVC = "nginx-clusterip-svc" + ClusterIpNamespace = "auto-clusterip" + NginxAppNodePortSVC = "nginx-app-nodeport" + TestNodePort = "test-nodeport" + NodePortNamespace = "auto-nodeport" + TestDaemonset = "test-daemonset" + NginxAppIngress = "nginx-app-ingress" + TestIngress = "test-ingress" + IngressNamespace = "auto-ingress" + DnsUtils = "dnsutils" + Nslookup = "kubernetes.default.svc.cluster.local" + SUC = "system-upgrade-controller" + RunningAssert = "Running" + CompletedAssert = "Completed" + InstallHelm = "curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash" + InstallRke2Server = "sudo curl -sfL https://get.rke2.io | sudo %s INSTALL_RKE2_TYPE=server sh - " + InstallRke2Agent = "sudo curl -sfL https://get.rke2.io | sudo %s INSTALL_RKE2_TYPE=agent sh - " + GetRuncVersion = "(find /var/lib/rancher/rke2/data/ -type f -name runc -exec {} --version \\;)" + GetVersion = "rke2 --version" + ModulesPath = "/modules" + TfVarsPath = "/modules/config/local.tfvars" +) diff --git a/tests/terraform/.gitignore b/tests/terraform/.gitignore deleted file mode 100644 index 493038c7c06..00000000000 --- a/tests/terraform/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.ssh/ -*.tfstate* -.terraform* -config/ diff --git a/tests/terraform/Makefile b/tests/terraform/Makefile deleted file mode 100644 index d2583119804..00000000000 --- a/tests/terraform/Makefile +++ /dev/null @@ -1,75 +0,0 @@ -##========================= Terraform Tests =========================# -include ./config.mk - - -TAGNAME ?= default -tf-up: - @cd ../.. && docker build . -q -f ./tests/terraform/scripts/Dockerfile.build -t rke2-tf-${TAGNAME} - -.PHONY: tf-run -tf-run: - @docker run -d --name rke2-tf-test-${IMGNAME} -t \ - -e AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID}" \ - -e AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY}" \ - -v ${ACCESS_KEY_LOCAL}:/go/src/github.com/rancher/rke2/tests/terraform/modules/config/.ssh/aws_key.pem \ - rke2-tf-${TAGNAME} sh -c 'cd ./tests/terraform ; \ - if [ -n "${ARGNAME}" ]; then \ - go test -v -timeout=45m \ - ./${TESTDIR}/... \ - -"${ARGNAME}"="${ARGVALUE}"; \ - elif [ -z "${TESTDIR}" ]; then \ - go test -v -timeout=40m \ - ./createcluster/...; \ - else \ - go test -v -timeout=45m \ - ./${TESTDIR}/...; \ - fi' - -.PHONY: tf-logs -tf-logs: - @docker logs -f rke2-tf-test-${IMGNAME} - -.PHONY: tf-down -tf-down: - @echo "Removing containers and images" - @docker stop $$(docker ps -a -q --filter="name=rke2-tf*") - @docker rm $$(docker ps -a -q --filter="name=rke2-tf*") - @docker rmi $$(docker images -q --filter="reference=rke2-tf*") - -.PHONY: tf-clean -tf-clean: - @./scripts/delete_resources.sh - -.PHONY: tf-complete -tf-tests-complete: tf-clean tf-down tf-remove-state tf-up tf-run - - -#========================= Run terraform tests locally =========================# - -.PHONY: tf-create -tf-create: - @go test -timeout=45m -v ./createcluster/... - -.PHONY: tf-upgrade -tf-upgrade: - @go test -timeout=45m -v ./upgradecluster/... -${ARGNAME}=${ARGVALUE} - -.PHONY: tf-remove-state -tf-remove-state: - @rm -rf ./modules/.terraform - @rm -rf ./modules/.terraform.lock.hcl ./modules/terraform.tfstate ./modules/terraform.tfstate.backup - -.PHONY: tf-test-suite -tf-test-suite: - @make tf-remove-state && make tf-create ; sleep 5 && \ - make tf-remove-state && make tf-upgrade ${ARGNAME}=${ARGVALUE} - -.PHONY: tf-test-suite-same-cluster -tf-test-suite-same-cluster: - @make tf-create ; sleep 5 && make tf-upgrade ${ARGNAME}=${ARGVALUE} - -#========================= TestCode Static Quality Check =========================# -.PHONY: vet-lint ## Run locally only inside Tests package -vet-lint: - @echo "Running go vet and lint" - @go vet ./${TESTDIR} && golangci-lint run --tests \ No newline at end of file diff --git a/tests/terraform/README.md b/tests/terraform/README.md deleted file mode 100644 index ad62079bcc5..00000000000 --- a/tests/terraform/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# Terraform (TF) Tests - -Terraform (TF) tests are an additional form of End-to-End (E2E) tests that cover multi-node RKE2 configuration and administration: install, update, teardown, etc. across a wide range of operating systems. Terraform tests are used as part of RKE2 quality assurance (QA) to bring up clusters with different configurations on demand, perform specific functionality tests, and keep them up and running to perform some exploratory tests in real-world scenarios. - -## Framework -TF tests utilize [Ginkgo](https://onsi.github.io/ginkgo/) and [Gomega](https://onsi.github.io/gomega/) like the e2e tests. They rely on [Terraform](https://www.terraform.io/) to provide the underlying cluster configuration. - -## Format - -- All TF tests should be placed under `tests/terraform/`. -- All TF test functions should be named: `Test_TF`. - -See the [create cluster test](../tests/terraform/createcluster_test.go) as an example. - -## Running - -- Before running the tests, it's required to create a tfvars file in `./tests/terraform/modules/config/local.tfvars`. This should be filled in to match the desired variables, including those relevant for your AWS environment. All variables that are necessary can be seen in [main.tf](../tests/terraform/modules/main.tf). -It is also required to have standard AWS environment variables present: `AWS_ACCESS_KEY_ID` , `AWS_SECRET_ACCESS_KEY` and `ACCESS_KEY_LOCAL` - - -- The local.tfvars split roles section should be strictly followed to not cause any false positives or negatives on tests - - -- Please also when creating tf var resource_name, make sure that you do not have any instances from other automations with the same name to avoid deleting wrong resources - - -- If you want to run tests locally totally in parallel, please make sure that you have different resource_name for each test - -*** - -Tests can be run per package with: -```bash -go test -timeout=30m -v ./tests/terraform/$PACKAGE_NAME/... -``` -Additionally, you can use docker to run the tests, which may be beneficial when wanting to run multiple tests in parallel. Just be sure to change the resource name in the tfvars file to ensure there won't be overwrites! Provided example below is for running two separate packages using docker: -```bash -$ docker build . -f ./tests/terraform/scripts/Dockerfile.build -t rke2-tf -# These next commands assume you have the following environment variable in your config/local.tfvars: 'access_key = "/tmp/aws_key.pem"' -$ docker run --name rke2-tf-creation-test -t -e AWS_ACCESS_KEY_ID= -e AWS_SECRET_ACCESS_KEY= -v /path/to/aws/key.pem:/tmp/aws_key.pem rke2-tf sh -c "go test -timeout=30m -v ./tests/terraform/createcluster/..." -$ docker run --name rke2-tf-upgrade-test -t -e AWS_ACCESS_KEY_ID= -e AWS_SECRET_ACCESS_KEY= -v /path/to/aws/key.pem:/tmp/aws_key.pem rke2-tf sh -c "go test -timeout=45m -v ./tests/terraform/upgradecluster/... -upgradeVersion=v1.24.8+rke2r1" -``` -Test Flags: -``` -- ${upgradeVersion} version to upgrade to -``` -We can also run tests through the Makefile through ./test/terraform directory: - -- On the first run with make and docker please delete your .terraform folder, terraform.tfstate and terraform.hcl.lock file -```bash -Args: -*All args are optional and can be used with: - -`$make tf-run` `$make tf-logs`, -`$make vet-lint` `$make tf-complete`, -`$make tf-upgrade` `$make tf-test-suite-same-cluster`, -`$make tf-test-suite` - -- ${IMGNAME} append any string to the end of image name -- ${TAGNAME} append any string to the end of tag name -- ${ARGNAME} name of the arg to pass to the test -- ${ARGVALUE} value of the arg to pass to the test -- ${TESTDIR} path to the test directory - -Commands: -$ make tf-up # create the image from Dockerfile.build -$ make tf-run # runs all tests if no flags or args provided -$ make tf-down # removes the image -$ make tf-clean # removes instances and resources created by tests -$ make tf-logs # prints logs from container the tests -$ make tf--complete # clean resources + remove images + run tests -$ make tf-create # runs create cluster test locally -$ make tf-upgrade # runs upgrade cluster test locally -$ make tf-test-suite-same-cluster # runs all tests locally in sequence using the same state -$ make tf-remove-state # removes terraform state dir and files -$ make tf-test-suite # runs all tests locally in sequence not using the same state -$ make vet-lint # runs go vet and go lint - - -Examples: -$ make tf-up TAGNAME=ubuntu -$ make tf-run IMGNAME=2 TAGNAME=ubuntu TESTDIR=upgradecluster ARGNAME=upgradeVersion ARGVALUE=v1.26.2+rke2r1 -$ make tf-run TESTDIR=createcluster -$ make tf-logs IMGNAME=1 -$ make vet-lint TESTDIR=upgradecluster -``` - -# Running tests in parallel: -- You can play around and have a lot of different test combinations like: -``` -- Build docker image with different TAGNAME="OS`s" + with different configurations( resource_name, node_os, versions, install type, nodes and etc) and have unique "IMGNAMES" -- And in the meanwhile run also locally with different configuration while your dockers TAGNAME and IMGNAMES are running -``` - -# In between tests: -- If you want to run with same cluster do not delete ./tests/terraform/modules/terraform.tfstate + .terraform.lock.hcl file after each test. - -- If you want to use new resources then make sure to delete the ./tests/terraform/modules/terraform.tfstate + .terraform.lock.hcl file if you want to create a new cluster. - - -# Common Issues: - -- Issues related to terraform plugin please also delete the modules/.terraform folder -- In mac m1 maybe you need also to go to rke2/tests/terraform/modules and run `terraform init` to download the plugins - - -# Debugging -To focus individual runs on specific test clauses, you can prefix with `F`. For example, in the [create cluster test](../tests/terraform/createcluster_test.go), you can update the initial creation to be: `FIt("Starts up with no issues", func() {` in order to focus the run on only that clause. diff --git a/tests/terraform/createcluster/createcluster.go b/tests/terraform/createcluster/createcluster.go deleted file mode 100644 index 7eb4e303526..00000000000 --- a/tests/terraform/createcluster/createcluster.go +++ /dev/null @@ -1,92 +0,0 @@ -package createcluster - -import ( - "fmt" - - "path/filepath" - "strconv" - "testing" - - "github.com/gruntwork-io/terratest/modules/terraform" - tf "github.com/rancher/rke2/tests/terraform" -) - -var ( - KubeConfigFile string - MasterIPs string - WorkerIPs string - NumServers int - NumWorkers int - AwsUser string - AccessKey string - modulesPath = "/tests/terraform/modules" - tfVarsPath = "/tests/terraform/modules/config/local.tfvars" -) - -func BuildCluster(t *testing.T, destroy bool) (string, error) { - tfDir, err := filepath.Abs(tf.Basepath() + modulesPath) - if err != nil { - return "", err - } - - varDir, err := filepath.Abs(tf.Basepath() + tfVarsPath) - if err != nil { - return "", err - } - terraformOptions := terraform.Options{ - TerraformDir: tfDir, - VarFiles: []string{varDir}, - } - - NumServers, err = strconv.Atoi(terraform.GetVariableAsStringFromVarFile(t, varDir, "no_of_server_nodes")) - if err != nil { - return "", err - } - NumWorkers, err = strconv.Atoi(terraform.GetVariableAsStringFromVarFile(t, varDir, "no_of_worker_nodes")) - if err != nil { - return "", err - } - - splitRoles := terraform.GetVariableAsStringFromVarFile(t, varDir, "split_roles") - if splitRoles == "true" { - etcdNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(t, varDir, "etcd_only_nodes")) - if err != nil { - return "", err - } - etcdCpNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(t, varDir, "etcd_cp_nodes")) - if err != nil { - return "", err - } - etcdWorkerNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(t, varDir, "etcd_worker_nodes")) - if err != nil { - return "", err - } - cpNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(t, varDir, "cp_only_nodes")) - if err != nil { - return "", err - } - cpWorkerNodes, err := strconv.Atoi(terraform.GetVariableAsStringFromVarFile(t, varDir, "cp_worker_nodes")) - if err != nil { - return "", err - } - NumServers = NumServers + etcdNodes + etcdCpNodes + etcdWorkerNodes + cpNodes + cpWorkerNodes - } - - AwsUser = terraform.GetVariableAsStringFromVarFile(t, varDir, "aws_user") - AccessKey = terraform.GetVariableAsStringFromVarFile(t, varDir, "access_key") - - if destroy { - fmt.Printf("Cluster is being deleted") - terraform.Destroy(t, &terraformOptions) - return "cluster destroyed", nil - } - - fmt.Printf("Creating Cluster") - - terraform.InitAndApply(t, &terraformOptions) - KubeConfigFile = terraform.Output(t, &terraformOptions, "kubeconfig") - MasterIPs = terraform.Output(t, &terraformOptions, "master_ips") - WorkerIPs = terraform.Output(t, &terraformOptions, "worker_ips") - - return "cluster created", nil -} diff --git a/tests/terraform/createcluster/createcluster_test.go b/tests/terraform/createcluster/createcluster_test.go deleted file mode 100644 index 62fcfb982c3..00000000000 --- a/tests/terraform/createcluster/createcluster_test.go +++ /dev/null @@ -1,259 +0,0 @@ -package createcluster - -import ( - "flag" - "fmt" - "regexp" - "strings" - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/rancher/rke2/tests/terraform" -) - -var destroy = flag.Bool("destroy", false, "a bool") - -func Test_TFClusterCreateValidation(t *testing.T) { - RegisterFailHandler(Fail) - flag.Parse() - - RunSpecs(t, "Create Cluster Test Suite") -} - -var _ = Describe("Test:", func() { - Context("Build Cluster:", func() { - It("Starts up with no issues", func() { - status, err := BuildCluster(&testing.T{}, false) - Expect(err).NotTo(HaveOccurred()) - Expect(status).To(Equal("cluster created")) - - defer GinkgoRecover() - - fmt.Println("Server Node IPS:", MasterIPs) - fmt.Println("Agent Node IPS:", WorkerIPs) - terraform.PrintFileContents(KubeConfigFile) - - Expect(KubeConfigFile).ShouldNot(BeEmpty()) - Expect(MasterIPs).ShouldNot(BeEmpty()) - - if NumWorkers > 0 { - Expect(WorkerIPs).ShouldNot(BeEmpty()) - } else { - Expect(WorkerIPs).Should(BeEmpty()) - } - Expect(KubeConfigFile).ShouldNot(BeEmpty()) - }) - - fmt.Printf("\nFetching node status\n") - It("Checks Node and Pod Status", func() { - defer func() { - _, err := terraform.Nodes(KubeConfigFile, true) - if err != nil { - fmt.Println("Error retrieving nodes: ", err) - } - _, err = terraform.Pods(KubeConfigFile, true) - if err != nil { - fmt.Println("Error retrieving pods: ", err) - } - }() - - expectedNodeCount := NumServers + NumWorkers - Eventually(func(g Gomega) { - nodes, err := terraform.Nodes(KubeConfigFile, false) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(nodes)).To(Equal(expectedNodeCount), - "Number of nodes should match the spec") - - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready"), - "Nodes should all be in Ready state") - } - }, "420s", "5s").Should(Succeed()) - - fmt.Printf("\nFetching pod status\n") - Eventually(func(g Gomega) { - pods, err := terraform.Pods(KubeConfigFile, false) - - g.Expect(err).NotTo(HaveOccurred()) - - re := regexp.MustCompile("[0-9]+") - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - g.Expect(pod.Restarts).Should(Equal("0"), pod.Name) - numRunning := re.FindAllString(pod.Ready, 2) - g.Expect(numRunning[0]).Should(Equal(numRunning[1]), pod.Name, - "should have all containers running") - } - } - }, "600s", "5s").Should(Succeed()) - }) - - It("Verifies ClusterIP Service", func() { - namespace := "auto-clusterip" - _, err := terraform.DeployWorkload("clusterip.yaml", KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed") - - defer terraform.RemoveWorkload("clusterip.yaml", KubeConfigFile) - - Eventually(func(g Gomega) { - cmd := "kubectl get pods -n " + namespace + " -o=name -l k8s-app=nginx-app-clusterip " + - "--field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-clusterip")) - }, "420s", "5s").Should(Succeed()) - - clusterip, port, _ := terraform.FetchClusterIP(KubeConfigFile, namespace, - "nginx-clusterip-svc") - - cmd := "curl -sL --insecure http://" + clusterip + ":" + port + "/name.html" - nodeExternalIP := terraform.FetchNodeExternalIP(KubeConfigFile) - - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - res, err := terraform.RunCommandOnNode(cmd, ip, AwsUser, AccessKey) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-clusterip")) - }, "420s", "10s").Should(Succeed()) - } - }) - - It("Verifies NodePort Service", func() { - namespace := "auto-nodeport" - _, err := terraform.DeployWorkload("nodeport.yaml", KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed") - - defer terraform.RemoveWorkload("nodeport.yaml", KubeConfigFile) - - nodeExternalIP := terraform.FetchNodeExternalIP(KubeConfigFile) - cmd := "kubectl get service -n " + namespace + " nginx-nodeport-svc --kubeconfig=" + - KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - - nodeport, err := terraform.RunCommand(cmd) - Expect(err).NotTo(HaveOccurred()) - - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - cmd := "kubectl get pods -n " + namespace + " -o=name -l k8s-app=nginx-app-nodeport " + - "--field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "240s", "5s").Should(Succeed()) - - cmd = "curl -sL --insecure http://" + ip + ":" + nodeport + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "240s", "5s").Should(Succeed()) - } - }) - - It("Verifies Ingress", func() { - namespace := "auto-ingress" - _, err := terraform.DeployWorkload("ingress.yaml", KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") - - defer terraform.RemoveWorkload("ingress.yaml", KubeConfigFile) - - Eventually(func(g Gomega) { - cmd := "kubectl get pods -n " + namespace + " -o=name -l k8s-app=nginx-app-ingress" + - " --field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile - res, err := terraform.RunCommand(cmd) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-ingress")) - }, "240s", "5s").Should(Succeed()) - - var ingressIps []string - nodes, err := terraform.WorkerNodes(KubeConfigFile, false) - if err != nil { - fmt.Println("Error retrieving nodes: ", err) - } - - Eventually(func(g Gomega) { - ingressIps, err = terraform.FetchIngressIP(namespace, KubeConfigFile) - - g.Expect(err).NotTo(HaveOccurred(), "Ingress ip is not returned") - g.Expect(len(ingressIps)).To(Equal(len(nodes)), - "Number of ingress IPs should match the number of nodes") - }, "240s", "5s").Should(Succeed()) - - for _, ip := range ingressIps { - cmd := "curl -s --header host:foo1.bar.com" + " http://" + ip + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-ingress")) - }, "240s", "5s").Should(Succeed()) - } - }) - - It("Verifies Daemonset", func() { - _, err := terraform.DeployWorkload("daemonset.yaml", KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed") - - defer terraform.RemoveWorkload("daemonset.yaml", KubeConfigFile) - - nodes, _ := terraform.WorkerNodes(KubeConfigFile, false) - pods, _ := terraform.Pods(KubeConfigFile, false) - - Eventually(func(g Gomega) { - count := terraform.CountOfStringInSlice("test-daemonset", pods) - g.Expect(count).Should(Equal(len(nodes)), - "Daemonset pod count does not match node count") - }, "420s", "10s").Should(Succeed()) - }) - - It("Verifies dns access", func() { - namespace := "auto-dns" - _, err := terraform.DeployWorkload("dnsutils.yaml", KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed") - - defer terraform.RemoveWorkload("dnsutils.yaml", KubeConfigFile) - - Eventually(func(g Gomega) { - cmd := "kubectl get pods dnsutils " + "-n " + namespace + " --kubeconfig=" + KubeConfigFile - res, _ := terraform.RunCommand(cmd) - g.Expect(res).Should(ContainSubstring("dnsutils")) - g.Expect(res).Should(ContainSubstring("Running")) - }, "420s", "2s").Should(Succeed()) - - Eventually(func(g Gomega) { - cmd := "kubectl -n " + namespace + " --kubeconfig=" + - KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default" - res, _ := terraform.RunCommand(cmd) - g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local")) - }, "420s", "2s").Should(Succeed()) - }) - }) -}) - -var _ = BeforeEach(func() { - if *destroy { - Skip("Cluster is being Deleted") - } -}) - -var _ = AfterEach(func() { - if CurrentSpecReport().Failed() { - fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) - } else { - fmt.Printf("\nPASSED! %s\n", CurrentSpecReport().FullText()) - } -}) - -var _ = AfterSuite(func() { - if *destroy { - status, err := BuildCluster(&testing.T{}, *destroy) - Expect(err).NotTo(HaveOccurred()) - Expect(status).To(Equal("cluster destroyed")) - } -}) diff --git a/tests/terraform/manual_upgrade/upgradecluster_test.go b/tests/terraform/manual_upgrade/upgradecluster_test.go deleted file mode 100644 index 3c15819fc74..00000000000 --- a/tests/terraform/manual_upgrade/upgradecluster_test.go +++ /dev/null @@ -1,387 +0,0 @@ -package manual_upgrade - -import ( - "flag" - "fmt" - "regexp" - "strings" - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/rancher/rke2/tests/terraform" - "github.com/rancher/rke2/tests/terraform/createcluster" -) - -var tfVars = flag.String("tfvars", "/tests/terraform/modules/config/local.tfvars", "custom .tfvars file from base project path") -var destroy = flag.Bool("destroy", false, "a bool") -var upgradeVersion = flag.String("upgradeVersion", "", "Version to upgrade the cluster to") -var channel = flag.String("channel", "", "Channel to use inside the installing command") -var failed bool - -func Test_TFUpgradeClusterValidation(t *testing.T) { - RegisterFailHandler(Fail) - flag.Parse() - - RunSpecs(t, "Upgrade Cluster Test Suite") -} - -var _ = Describe("Upgrade Tests:", func() { - Context("Build Cluster:", func() { - It("Starts up with no issues", func() { - status, err := createcluster.BuildCluster(&testing.T{}, false) - Expect(err).NotTo(HaveOccurred()) - Expect(status).To(Equal("cluster created")) - defer GinkgoRecover() - fmt.Println("Server Node IPS:", createcluster.MasterIPs) - fmt.Println("Agent Node IPS:", createcluster.WorkerIPs) - terraform.PrintFileContents(createcluster.KubeConfigFile) - Expect(createcluster.MasterIPs).ShouldNot(BeEmpty()) - if createcluster.NumWorkers > 0 { - Expect(createcluster.WorkerIPs).ShouldNot(BeEmpty()) - } else { - Expect(createcluster.WorkerIPs).Should(BeEmpty()) - } - Expect(createcluster.KubeConfigFile).ShouldNot(BeEmpty()) - }) - - It("Checks Node and Pod Status", func() { - defer func() { - _, err := terraform.Nodes(createcluster.KubeConfigFile, true) - if err != nil { - fmt.Println("Error retrieving nodes preupgrade: ", err) - } - _, err = terraform.Pods(createcluster.KubeConfigFile, true) - if err != nil { - fmt.Println("Error retrieving pods preupgrade: ", err) - } - }() - - fmt.Printf("\nFetching node status preupgrade\n") - expectedNodeCount := createcluster.NumServers + createcluster.NumWorkers - Eventually(func(g Gomega) { - nodes, err := terraform.Nodes(createcluster.KubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(nodes)).To(Equal(expectedNodeCount), "Number of nodes should match the spec") - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready"), "Nodes should all be in Ready state") - } - }, "420s", "5s").Should(Succeed()) - - re := regexp.MustCompile("[0-9]+") - fmt.Printf("\nFetching pod status preupgrade\n") - Eventually(func(g Gomega) { - pods, err := terraform.Pods(createcluster.KubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - g.Expect(pod.Restarts).Should(Equal("0"), pod.Name) - numRunning := re.FindAllString(pod.Ready, 2) - g.Expect(numRunning[0]).Should(Equal(numRunning[1]), pod.Name, "should have all containers running") - } - } - }, "600s", "5s").Should(Succeed()) - }) - }) - - Context("Preupgrade Validations:", func() { - It("Verifies ClusterIP Service Preupgrade", func() { - namespace := "auto-clusterip" - _, err := terraform.DeployWorkload("clusterip.yaml", createcluster.KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed") - - Eventually(func(g Gomega) { - res, err := terraform.IsAppRunning(namespace, "nginx-app-clusterip", createcluster.KubeConfigFile) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should((ContainSubstring("test-clusterip"))) - }, "420s", "5s").Should(Succeed()) - - clusterip, port, _ := terraform.FetchClusterIP(createcluster.KubeConfigFile, namespace, "nginx-clusterip-svc") - cmd := "curl -sL --insecure http://" + clusterip + ":" + port + "/name.html" - nodeExternalIP := terraform.FetchNodeExternalIP(createcluster.KubeConfigFile) - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - res, err := terraform.RunCommandOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-clusterip")) - }, "420s", "10s").Should(Succeed()) - } - }) - - It("Verifies NodePort Service Preupgrade", func() { - namespace := "auto-nodeport" - _, err := terraform.DeployWorkload("nodeport.yaml", createcluster.KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed") - - nodeExternalIP := terraform.FetchNodeExternalIP(createcluster.KubeConfigFile) - cmd := "kubectl get service -n " + namespace + " nginx-nodeport-svc --kubeconfig=" + createcluster.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - nodeport, err := terraform.RunCommand(cmd) - Expect(err).NotTo(HaveOccurred()) - - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - res, err := terraform.IsAppRunning(namespace, "nginx-app-nodeport", createcluster.KubeConfigFile) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "240s", "5s").Should(Succeed()) - - cmd = "curl -sL --insecure http://" + ip + ":" + nodeport + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "240s", "5s").Should(Succeed()) - } - }) - - It("Verifies Ingress Preupgrade", func() { - namespace := "auto-ingress" - _, err := terraform.DeployWorkload("ingress.yaml", createcluster.KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") - - Eventually(func(g Gomega) { - res, err := terraform.IsAppRunning(namespace, "nginx-app-ingress", createcluster.KubeConfigFile) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-ingress")) - }, "240s", "5s").Should(Succeed()) - - var ingressIps []string - nodes, err := terraform.WorkerNodes(createcluster.KubeConfigFile, false) - if err != nil { - fmt.Println("Error retrieving nodes: ", err) - } - Eventually(func(g Gomega) { - ingressIps, err = terraform.FetchIngressIP(namespace, createcluster.KubeConfigFile) - g.Expect(err).NotTo(HaveOccurred(), "Ingress ip is not returned") - g.Expect(len(ingressIps)).To(Equal(len(nodes)), "Number of ingress IPs should match the number of nodes") - }, "240s", "5s").Should(Succeed()) - - for _, ip := range ingressIps { - cmd := "curl -s --header host:foo1.bar.com" + " http://" + ip + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-ingress")) - }, "240s", "5s").Should(Succeed()) - } - }) - - It("Verifies Daemonset Preupgrade", func() { - _, err := terraform.DeployWorkload("daemonset.yaml", createcluster.KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed") - - nodes, _ := terraform.WorkerNodes(createcluster.KubeConfigFile, false) - pods, _ := terraform.Pods(createcluster.KubeConfigFile, false) - - Eventually(func(g Gomega) { - count := terraform.CountOfStringInSlice("test-daemonset", pods) - g.Expect(count).Should((Equal(len(nodes))), "Daemonset pod count does not match node count") - }, "420s", "10s").Should(Succeed()) - }) - - It("Verifies DNS Access Preupgrade", func() { - namespace := "auto-dns" - _, err := terraform.DeployWorkload("dnsutils.yaml", createcluster.KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed") - - Eventually(func(g Gomega) { - cmd := "kubectl get pods dnsutils " + "-n " + namespace + " --kubeconfig=" + createcluster.KubeConfigFile - res, _ := terraform.RunCommand(cmd) - g.Expect(res).Should(ContainSubstring("dnsutils")) - g.Expect(res).Should(ContainSubstring("Running")) - }, "420s", "2s").Should(Succeed()) - - Eventually(func(g Gomega) { - cmd := "kubectl -n " + namespace + " --kubeconfig=" + createcluster.KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default" - res, _ := terraform.RunCommand(cmd) - g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local")) - }, "420s", "2s").Should(Succeed()) - }) - }) - - Context("Upgrade nodes via manual:", func() { - It("Upgrade nodes", func() { - var cmdUpgradeVersion string - if *channel != "" { - // If a channel is specified, upgrade using that channel - cmdUpgradeVersion = "sudo curl -sfL https://get.rke2.io | sudo INSTALL_RKE2_VERSION=" + *upgradeVersion + " INSTALL_RKE2_CHANNEL=" + *channel + " sh -" - } else { - // If no channel is specified, upgrade using the version specified - cmdUpgradeVersion = "sudo curl -sfL https://get.rke2.io | sudo INSTALL_RKE2_VERSION=" + *upgradeVersion + " sh -" - } - - versionRegex := regexp.MustCompile("-rc[0-9]+") - k8sVersion := versionRegex.ReplaceAllString(*upgradeVersion, "") - re := regexp.MustCompile("[0-9]+") - - nodes, err := terraform.Nodes(createcluster.KubeConfigFile, false) - if err != nil { - fmt.Println("Error retrieving nodes: ", err) - } - - for _, node := range nodes { - // Every node will be upgraded and restarted - - Eventually(func(g Gomega) { - fmt.Println("\nUpgrading ", node.Roles, node.ExternalIP) - terraform.RunCommandOnNode(cmdUpgradeVersion, node.ExternalIP, createcluster.AwsUser, createcluster.AccessKey) - }, "120s", "2s").Should(Succeed()) - - Eventually(func(g Gomega) { - fmt.Println("Restarting node: ", node.Roles, node.ExternalIP) - terraform.RunCommandOnNode("sudo systemctl restart rke2-*", node.ExternalIP, createcluster.AwsUser, createcluster.AccessKey) - }, "240s", "2s").Should(Succeed()) - } - - //Fetch node status & version PostUpgrade - Eventually(func(g Gomega) { - expectedNodeCount := createcluster.NumServers + createcluster.NumWorkers - nodes, err := terraform.Nodes(createcluster.KubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(nodes)).To(Equal(expectedNodeCount), "Number of nodes should match the spec") - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready"), "Nodes should all be in Ready state after upgrading") - g.Expect(node.Version).Should(Equal(k8sVersion), "Nodes should all be upgraded to the specified version") - } - }, "900s", "30s").Should(Succeed()) - - //Fetch pods status PostUpgrade - Eventually(func(g Gomega) { - pods, err := terraform.Pods(createcluster.KubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - numRunning := re.FindAllString(pod.Ready, 2) - g.Expect(numRunning[0]).Should(Equal(numRunning[1]), pod.Name, "should have all containers running") - } - } - }, "600s", "5s").Should(Succeed()) - }) - }) - - Context("Postupgrade Validations:", func() { - It("Verifies ClusterIP Service Postupgrade", func() { - namespace := "auto-clusterip" - defer terraform.RemoveWorkload("clusterip.yaml", createcluster.KubeConfigFile) - - res, err := terraform.IsAppRunning(namespace, "nginx-app-clusterip", createcluster.KubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - Expect(res).Should((ContainSubstring("test-clusterip"))) - - clusterip, port, _ := terraform.FetchClusterIP(createcluster.KubeConfigFile, namespace, "nginx-clusterip-svc") - cmd := "curl -sL --insecure http://" + clusterip + ":" + port + "/name.html" - nodeExternalIP := terraform.FetchNodeExternalIP(createcluster.KubeConfigFile) - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - res, err := terraform.RunCommandOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-clusterip")) - }, "120s", "10s").Should(Succeed()) - } - }) - - It("Verifies NodePort Service Postupgrade", func() { - namespace := "auto-nodeport" - defer terraform.RemoveWorkload("nodeport.yaml", createcluster.KubeConfigFile) - - nodeExternalIP := terraform.FetchNodeExternalIP(createcluster.KubeConfigFile) - cmd := "kubectl get service -n " + namespace + " nginx-nodeport-svc --kubeconfig=" + createcluster.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - nodeport, err := terraform.RunCommand(cmd) - Expect(err).NotTo(HaveOccurred()) - - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - res, err := terraform.IsAppRunning(namespace, "nginx-app-nodeport", createcluster.KubeConfigFile) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "120s", "5s").Should(Succeed()) - - cmd = "curl -sL --insecure http://" + ip + ":" + nodeport + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "120s", "5s").Should(Succeed()) - } - }) - - It("Verifies Ingress Postupgrade", func() { - namespace := "auto-ingress" - defer terraform.RemoveWorkload("ingress.yaml", createcluster.KubeConfigFile) - - res, err := terraform.IsAppRunning(namespace, "nginx-app-ingress", createcluster.KubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - Expect(res).Should(ContainSubstring("test-ingress")) - - var ingressIps []string - nodes, err := terraform.WorkerNodes(createcluster.KubeConfigFile, false) - if err != nil { - fmt.Println("Error retrieving nodes: ", err) - } - Eventually(func(g Gomega) { - ingressIps, err = terraform.FetchIngressIP(namespace, createcluster.KubeConfigFile) - g.Expect(err).NotTo(HaveOccurred(), "Ingress ip is not returned") - g.Expect(len(ingressIps)).To(Equal(len(nodes)), "Number of ingress IPs should match the number of nodes") - }, "120s", "5s").Should(Succeed()) - - for _, ip := range ingressIps { - cmd := "curl -s --header host:foo1.bar.com" + " http://" + ip + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-ingress")) - }, "120s", "5s").Should(Succeed()) - } - }) - - It("Verifies Daemonset Postupgrade", func() { - defer terraform.RemoveWorkload("daemonset.yaml", createcluster.KubeConfigFile) - nodes, _ := terraform.WorkerNodes(createcluster.KubeConfigFile, false) - pods, _ := terraform.Pods(createcluster.KubeConfigFile, false) - count := terraform.CountOfStringInSlice("test-daemonset", pods) - Expect(count).Should((Equal(len(nodes))), "Daemonset pod count does not match node count") - }) - - It("Verifies DNS Access Postupgrade", func() { - namespace := "auto-dns" - _, err := terraform.DeployWorkload("dnsutils.yaml", createcluster.KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed") - defer terraform.RemoveWorkload("dnsutils.yaml", createcluster.KubeConfigFile) - - Eventually(func(g Gomega) { - cmd := "kubectl get pods dnsutils " + "-n " + namespace + " --kubeconfig=" + createcluster.KubeConfigFile - res, _ := terraform.RunCommand(cmd) - g.Expect(res).Should(ContainSubstring("dnsutils")) - g.Expect(res).Should(ContainSubstring("Running")) - }, "120s", "2s").Should(Succeed()) - - Eventually(func(g Gomega) { - cmd := "kubectl -n " + namespace + " --kubeconfig=" + createcluster.KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default" - res, _ := terraform.RunCommand(cmd) - g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local")) - }, "120s", "2s").Should(Succeed()) - }) - }) - -}) - -var _ = BeforeEach(func() { - if *destroy { - Skip("Cluster is being Deleted") - } -}) - -var _ = AfterEach(func() { - if CurrentSpecReport().Failed() { - fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) - } else { - fmt.Printf("\nPASSED! %s\n", CurrentSpecReport().FullText()) - } -}) diff --git a/tests/terraform/modules/providers.tf b/tests/terraform/modules/providers.tf deleted file mode 100644 index 0ced9d75210..00000000000 --- a/tests/terraform/modules/providers.tf +++ /dev/null @@ -1,3 +0,0 @@ -provider "aws" { - region = "${var.region}" -} diff --git a/tests/terraform/scripts/delete_resources.sh b/tests/terraform/scripts/delete_resources.sh deleted file mode 100755 index af24ea329ab..00000000000 --- a/tests/terraform/scripts/delete_resources.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash - -#Get resource name from tfvarslocal && change name to make more sense in this context -RESOURCE_NAME=$(grep resource_name /dev/null 2>&1 - - -#Get the list of load balancer ARNs -LB_ARN_LIST=$(aws elbv2 describe-load-balancers \ - --query "LoadBalancers[?starts_with(LoadBalancerName, '${NAME_PREFIX}') && Type=='network'].LoadBalancerArn" \ - --output text) - - -#Loop through the load balancer ARNs and delete the load balancers -for LB_ARN in $LB_ARN_LIST; do - echo "Deleting load balancer $LB_ARN" - aws elbv2 delete-load-balancer --load-balancer-arn "$LB_ARN" -done - - -#Get the list of target group ARNs -TG_ARN_LIST=$(aws elbv2 describe-target-groups \ - --query "TargetGroups[?starts_with(TargetGroupName, '${NAME_PREFIX}') && Protocol=='TCP'].TargetGroupArn" \ - --output text) - - -#Loop through the target group ARNs and delete the target groups -for TG_ARN in $TG_ARN_LIST; do - echo "Deleting target group $TG_ARN" - aws elbv2 delete-target-group --target-group-arn "$TG_ARN" -done - - -#Get the ID and recordName with lower case of the hosted zone that contains the Route 53 record sets -NAME_PREFIX_LOWER=$(echo "$NAME_PREFIX" | tr '[:upper:]' '[:lower:]') -R53_ZONE_ID=$(aws route53 list-hosted-zones-by-name --dns-name "${NAME_PREFIX}." \ - --query "HostedZones[0].Id" --output text) -R53_RECORD=$(aws route53 list-resource-record-sets \ - --hosted-zone-id "${R53_ZONE_ID}" \ - --query "ResourceRecordSets[?starts_with(Name, '${NAME_PREFIX_LOWER}.') && Type == 'CNAME'].Name" \ - --output text) - - -#Get ResourceRecord Value -RECORD_VALUE=$(aws route53 list-resource-record-sets \ - --hosted-zone-id "${R53_ZONE_ID}" \ - --query "ResourceRecordSets[?starts_with(Name, '${NAME_PREFIX_LOWER}.') \ - && Type == 'CNAME'].ResourceRecords[0].Value" --output text) - - -#Delete Route53 record -if [[ "$R53_RECORD" == "${NAME_PREFIX_LOWER}."* ]]; then - echo "Deleting Route53 record ${R53_RECORD}" - CHANGE_STATUS=$(aws route53 change-resource-record-sets --hosted-zone-id "${R53_ZONE_ID}" \ - --change-batch '{"Changes": [ - { - "Action": "DELETE", - "ResourceRecordSet": { - "Name": "'"${R53_RECORD}"'", - "Type": "CNAME", - "TTL": 300, - "ResourceRecords": [ - { - "Value": "'"${RECORD_VALUE}"'" - } - ] - } - } - ] - }') - STATUS_ID=$(echo "$CHANGE_STATUS" | jq -r '.ChangeInfo.Id') - #Get status from the change - aws route53 wait resource-record-sets-changed --id "$STATUS_ID" - echo "Successfully deleted Route53 record ${R53_RECORD}: status: ${STATUS_ID}" -else - echo "No Route53 record found" -fi \ No newline at end of file diff --git a/tests/terraform/suc_upgrade/upgradecluster.go b/tests/terraform/suc_upgrade/upgradecluster.go deleted file mode 100644 index fee95468b2a..00000000000 --- a/tests/terraform/suc_upgrade/upgradecluster.go +++ /dev/null @@ -1,28 +0,0 @@ -package suc_upgrade - -import ( - "fmt" - "os" - "regexp" - "strings" - - "github.com/rancher/rke2/tests/terraform" -) - -func upgradeCluster(version string, kubeconfig string) error { - if strings.TrimSpace(version) == "" { - return fmt.Errorf("please provide a non-empty rke2 version to upgrade to") - } - regex := regexp.MustCompile(`\+`) - sucVersion := regex.ReplaceAllString(version, "-") - originalFilePath := terraform.Basepath() + "/tests/terraform/resource_files" + "/upgrade-plan.yaml" - newFilePath := terraform.Basepath() + "/tests/terraform/resource_files" + "/plan.yaml" - content, err := os.ReadFile(originalFilePath) - if err != nil { - return err - } - newContent := strings.ReplaceAll(string(content), "$UPGRADEVERSION", sucVersion) - os.WriteFile(newFilePath, []byte(newContent), 0777) - _, err = terraform.DeployWorkload("plan.yaml", kubeconfig) - return err -} diff --git a/tests/terraform/suc_upgrade/upgradecluster_test.go b/tests/terraform/suc_upgrade/upgradecluster_test.go deleted file mode 100644 index 80dd8da3ccb..00000000000 --- a/tests/terraform/suc_upgrade/upgradecluster_test.go +++ /dev/null @@ -1,453 +0,0 @@ -package suc_upgrade - -import ( - "flag" - "fmt" - "regexp" - "strings" - "testing" - - "github.com/rancher/rke2/tests/terraform" - "github.com/rancher/rke2/tests/terraform/createcluster" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var ( - destroy = flag.Bool("destroy", false, "a bool") - upgradeVersion = flag.String("upgradeVersion", "", "Version to upgrade the cluster to") -) - -func Test_TFUpgradeClusterValidation(t *testing.T) { - RegisterFailHandler(Fail) - flag.Parse() - - RunSpecs(t, "Upgrade Cluster Test Suite") -} - -var _ = Describe("Upgrade Tests:", func() { - Context("Build Cluster:", func() { - It("Starts up with no issues", func() { - status, err := createcluster.BuildCluster(&testing.T{}, false) - Expect(err).NotTo(HaveOccurred()) - Expect(status).To(Equal("cluster created")) - - defer GinkgoRecover() - - fmt.Println("Server Node IPS:", createcluster.MasterIPs) - fmt.Println("Agent Node IPS:", createcluster.WorkerIPs) - terraform.PrintFileContents(createcluster.KubeConfigFile) - - Expect(createcluster.KubeConfigFile).ShouldNot(BeEmpty()) - Expect(createcluster.MasterIPs).ShouldNot(BeEmpty()) - - if createcluster.NumWorkers > 0 { - Expect(createcluster.WorkerIPs).ShouldNot(BeEmpty()) - } else { - Expect(createcluster.WorkerIPs).Should(BeEmpty()) - } - Expect(createcluster.KubeConfigFile).ShouldNot(BeEmpty()) - }) - - It("Checks Node and Pod Status", func() { - defer func() { - fmt.Printf("\nFetching node status preupgrade\n") - - _, err := terraform.Nodes(createcluster.KubeConfigFile, true) - if err != nil { - fmt.Println("Error retrieving nodes preupgrade: ", err) - } - _, err = terraform.Pods(createcluster.KubeConfigFile, true) - if err != nil { - fmt.Println("Error retrieving pods preupgrade: ", err) - } - }() - - expectedNodeCount := createcluster.NumServers + createcluster.NumWorkers - Eventually(func(g Gomega) { - nodes, err := terraform.Nodes(createcluster.KubeConfigFile, false) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(nodes)).To(Equal(expectedNodeCount), - "Number of nodes should match the spec") - - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready"), - "Nodes should all be in Ready state", node.Name) - } - }, "420s", "5s").Should(Succeed()) - - fmt.Printf("\nFetching pod status preupgrade\n") - Eventually(func(g Gomega) { - pods, err := terraform.Pods(createcluster.KubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - - re := regexp.MustCompile("[0-9]+") - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - g.Expect(pod.Restarts).Should(Equal("0"), pod.Name) - numRunning := re.FindAllString(pod.Ready, 2) - g.Expect(numRunning[0]).Should(Equal(numRunning[1]), pod.Name, - "should have all containers running") - } - } - }, "600s", "5s").Should(Succeed()) - }) - }) - - Context("Preupgrade Validations:", func() { - It("Verifies ClusterIP Service Preupgrade", func() { - namespace := "auto-clusterip" - _, err := terraform.DeployWorkload("clusterip.yaml", createcluster.KubeConfigFile) - - Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed") - - Eventually(func(g Gomega) { - cmd := "kubectl get pods -n " + namespace + " -o=name " + - "-l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running " + - "--kubeconfig=" + createcluster.KubeConfigFile - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-clusterip")) - }, "240s", "5s").Should(Succeed()) - - clusterip, port, _ := terraform.FetchClusterIP(createcluster.KubeConfigFile, - namespace, "nginx-clusterip-svc") - - cmd := "curl -sL --insecure http://" + clusterip + ":" + port + "/name.html" - nodeExternalIP := terraform.FetchNodeExternalIP(createcluster.KubeConfigFile) - - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - res, err := terraform.RunCommandOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-clusterip")) - }, "240s", "10s").Should(Succeed()) - } - }) - - It("Verifies NodePort Service Preupgrade", func() { - namespace := "auto-nodeport" - _, err := terraform.DeployWorkload("nodeport.yaml", createcluster.KubeConfigFile) - - Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed") - - nodeExternalIP := terraform.FetchNodeExternalIP(createcluster.KubeConfigFile) - cmd := "kubectl get service -n " + namespace + " nginx-nodeport-svc --kubeconfig=" + - createcluster.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - - nodeport, err := terraform.RunCommand(cmd) - Expect(err).NotTo(HaveOccurred()) - - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - cmd := "kubectl get pods -n " + namespace + " -o=name -l k8s-app=nginx-app-nodeport " + - "--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile - res, err := terraform.RunCommand(cmd) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "240s", "5s").Should(Succeed()) - - cmd = "curl -sL --insecure http://" + ip + ":" + nodeport + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "240s", "5s").Should(Succeed()) - } - }) - - It("Verifies Ingress Preupgrade", func() { - namespace := "auto-ingress" - _, err := terraform.DeployWorkload("ingress.yaml", createcluster.KubeConfigFile) - - Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") - - Eventually(func(g Gomega) { - cmd := "kubectl get pods -n " + namespace + " -o=name -l k8s-app=nginx-app-ingress " + - "--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile - res, err := terraform.RunCommand(cmd) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-ingress")) - }, "240s", "5s").Should(Succeed()) - - var ingressIps []string - nodes, err := terraform.WorkerNodes(createcluster.KubeConfigFile, false) - if err != nil { - fmt.Println("Error retrieving nodes: ", err) - } - - Eventually(func(g Gomega) { - ingressIps, err = terraform.FetchIngressIP(namespace, createcluster.KubeConfigFile) - - g.Expect(err).NotTo(HaveOccurred(), "Ingress ip is not returned") - g.Expect(len(ingressIps)).To(Equal(len(nodes)), - "Number of ingress IPs should match the number of nodes") - }, "240s", "5s").Should(Succeed()) - - for _, ip := range ingressIps { - cmd := "curl -s --header host:foo1.bar.com" + " http://" + ip + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-ingress")) - }, "240s", "5s").Should(Succeed()) - } - }) - - It("Verifies Daemonset Preupgrade", func() { - _, err := terraform.DeployWorkload("daemonset.yaml", createcluster.KubeConfigFile) - - Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed") - - nodes, _ := terraform.WorkerNodes(createcluster.KubeConfigFile, false) - pods, _ := terraform.Pods(createcluster.KubeConfigFile, false) - Eventually(func(g Gomega) { - count := terraform.CountOfStringInSlice("test-daemonset", pods) - g.Expect(count).Should(Equal(len(nodes)), - "Daemonset pod count does not match node count") - }, "240s", "10s").Should(Succeed()) - }) - - It("Verifies DNS Access Preupgrade", func() { - namespace := "auto-dns" - _, err := terraform.DeployWorkload("dnsutils.yaml", createcluster.KubeConfigFile) - - Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed") - - Eventually(func(g Gomega) { - cmd := "kubectl get pods dnsutils " + "-n " + namespace + - " --kubeconfig=" + createcluster.KubeConfigFile - res, _ := terraform.RunCommand(cmd) - - g.Expect(res).Should(ContainSubstring("dnsutils")) - g.Expect(res).Should(ContainSubstring("Running")) - }, "240s", "2s").Should(Succeed()) - - Eventually(func(g Gomega) { - cmd := "kubectl -n " + namespace + " --kubeconfig=" + - createcluster.KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default" - res, _ := terraform.RunCommand(cmd) - - g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local")) - }, "240s", "2s").Should(Succeed()) - }) - }) - - Context("Upgrade via SUC:", func() { - It("Verifies Upgrade", func() { - namespace := "system-upgrade" - _, err := terraform.DeployWorkload("suc.yaml", createcluster.KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), - "system-upgrade-controller manifest did not deploy successfully") - - Eventually(func(g Gomega) { - cmd := "kubectl get pods " + "-n " + namespace + " --kubeconfig=" + createcluster.KubeConfigFile - res, _ := terraform.RunCommand(cmd) - - g.Expect(res).Should(ContainSubstring("system-upgrade-controller")) - g.Expect(res).Should(ContainSubstring("Running")) - }, "120s", "2s").Should(Succeed()) - - err = upgradeCluster(*upgradeVersion, createcluster.KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "failed to upgrade cluster.") - - defer func() { - _, err := terraform.Nodes(createcluster.KubeConfigFile, true) - if err != nil { - fmt.Println("Error retrieving nodes postupgrade: ", err) - } - _, err = terraform.Pods(createcluster.KubeConfigFile, true) - if err != nil { - fmt.Println("Error retrieving pods postupgrade: ", err) - } - }() - - versionRegex := regexp.MustCompile("-rc[0-9]+") - k8sVersion := versionRegex.ReplaceAllString(*upgradeVersion, "") - - fmt.Printf("\nFetching node status postupgrade\n") - - expectedNodeCount := createcluster.NumServers + createcluster.NumWorkers - Eventually(func(g Gomega) { - nodes, err := terraform.Nodes(createcluster.KubeConfigFile, false) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(nodes)).To(Equal(expectedNodeCount), - "Number of nodes should match the spec") - - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready"), - "Nodes should all be in Ready state after upgrading", node.Name) - g.Expect(node.Version).Should(Equal(k8sVersion), - "Nodes should all be upgraded to the specified version", node.Name) - } - }, "900s", "30s").Should(Succeed()) - - fmt.Printf("\n Fetching pod status postupgrade\n") - Eventually(func(g Gomega) { - pods, err := terraform.Pods(createcluster.KubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - numRunning := versionRegex.FindAllString(pod.Ready, 2) - g.Expect(numRunning[0]).Should(Equal(numRunning[1]), pod.Name, "should have all containers running") - } - } - }, "600s", "5s").Should(Succeed()) - }) - }) - - Context("Postupgrade Validations:", func() { - It("Verifies ClusterIP Service Postupgrade", func() { - namespace := "auto-clusterip" - defer terraform.RemoveWorkload("clusterip.yaml", createcluster.KubeConfigFile) - - cmd := "kubectl get pods -n " + namespace + " -o=name -l " + - "k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + - createcluster.KubeConfigFile - res, err := terraform.RunCommand(cmd) - - Expect(err).NotTo(HaveOccurred()) - Expect(res).Should(ContainSubstring("test-clusterip")) - - clusterip, port, _ := terraform.FetchClusterIP(createcluster.KubeConfigFile, - namespace, "nginx-clusterip-svc") - cmd = "curl -sL --insecure http://" + clusterip + ":" + port + "/name.html" - nodeExternalIP := terraform.FetchNodeExternalIP(createcluster.KubeConfigFile) - - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - res, err := terraform.RunCommandOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-clusterip")) - }, "120s", "10s").Should(Succeed()) - } - }) - - It("Verifies NodePort Service Postupgrade", func() { - namespace := "auto-nodeport" - defer terraform.RemoveWorkload("nodeport.yaml", createcluster.KubeConfigFile) - - nodeExternalIP := terraform.FetchNodeExternalIP(createcluster.KubeConfigFile) - cmd := "kubectl get service -n " + namespace + " nginx-nodeport-svc --kubeconfig=" + - createcluster.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - nodeport, err := terraform.RunCommand(cmd) - - Expect(err).NotTo(HaveOccurred()) - - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - cmd := "kubectl get pods -n " + namespace + " -o=name -l " + - "k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + - createcluster.KubeConfigFile - res, err := terraform.RunCommand(cmd) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "120s", "5s").Should(Succeed()) - - cmd = "curl -sL --insecure http://" + ip + ":" + nodeport + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "120s", "5s").Should(Succeed()) - } - }) - - It("Verifies Ingress Postupgrade", func() { - namespace := "auto-ingress" - - defer terraform.RemoveWorkload("ingress.yaml", createcluster.KubeConfigFile) - - cmd := "kubectl get pods -n " + namespace + " -o=name -l k8s-app=nginx-app-ingress " + - "--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile - res, err := terraform.RunCommand(cmd) - - Expect(err).NotTo(HaveOccurred()) - Expect(res).Should(ContainSubstring("test-ingress")) - - var ingressIps []string - nodes, err := terraform.WorkerNodes(createcluster.KubeConfigFile, false) - if err != nil { - fmt.Println("Error retrieving nodes: ", err) - } - - Eventually(func(g Gomega) { - ingressIps, err = terraform.FetchIngressIP(namespace, createcluster.KubeConfigFile) - g.Expect(err).NotTo(HaveOccurred(), "Ingress ip is not returned") - g.Expect(len(ingressIps)).To(Equal(len(nodes)), - "Number of ingress IPs should match the number of nodes") - }, "120s", "5s").Should(Succeed()) - - for _, ip := range ingressIps { - cmd := "curl -s --header host:foo1.bar.com" + " http://" + ip + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-ingress")) - }, "120s", "5s").Should(Succeed()) - } - }) - - It("Verifies Daemonset Postupgrade", func() { - nodes, _ := terraform.WorkerNodes(createcluster.KubeConfigFile, false) - pods, _ := terraform.Pods(createcluster.KubeConfigFile, false) - count := terraform.CountOfStringInSlice("test-daemonset", pods) - - defer terraform.RemoveWorkload("daemonset.yaml", createcluster.KubeConfigFile) - Expect(count).Should((Equal(len(nodes))), - "Daemonset pod count does not match node count") - }) - - It("Verifies DNS Access Postupgrade", func() { - namespace := "auto-dns" - _, err := terraform.DeployWorkload("dnsutils.yaml", createcluster.KubeConfigFile) - - Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed") - - defer terraform.RemoveWorkload("dnsutils.yaml", createcluster.KubeConfigFile) - Eventually(func(g Gomega) { - cmd := "kubectl get pods dnsutils " + "-n " + namespace + - " --kubeconfig=" + createcluster.KubeConfigFile - res, _ := terraform.RunCommand(cmd) - - g.Expect(res).Should(ContainSubstring("dnsutils")) - g.Expect(res).Should(ContainSubstring("Running")) - }, "120s", "2s").Should(Succeed()) - - Eventually(func(g Gomega) { - cmd := "kubectl -n " + namespace + " --kubeconfig=" + createcluster.KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default" - res, _ := terraform.RunCommand(cmd) - - g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local")) - }, "120s", "2s").Should(Succeed()) - }) - }) -}) - -var _ = BeforeEach(func() { - if *destroy { - Skip("Cluster is being Deleted") - } -}) - -var _ = AfterEach(func() { - if CurrentSpecReport().Failed() { - fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) - } else { - fmt.Printf("\nPASSED! %s\n", CurrentSpecReport().FullText()) - } -}) diff --git a/tests/terraform/testutils.go b/tests/terraform/testutils.go deleted file mode 100644 index f4aa5c514d1..00000000000 --- a/tests/terraform/testutils.go +++ /dev/null @@ -1,292 +0,0 @@ -package terraform - -import ( - "bytes" - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "time" - - "golang.org/x/crypto/ssh" -) - -type KubectlCommand string - -var config *ssh.ClientConfig - -type Node struct { - Name string - Status string - Roles string - Version string - InternalIP string - ExternalIP string -} - -type Pod struct { - NameSpace string - Name string - Ready string - Status string - Restarts string - NodeIP string - Node string -} - -func publicKey(path string) (ssh.AuthMethod, error) { - key, err := os.ReadFile(path) - if err != nil { - return nil, err - } - signer, err := ssh.ParsePrivateKey(key) - if err != nil { - return nil, err - } - return ssh.PublicKeys(signer), nil -} - -func configureSSH(host string, sshUser string, sshKey string) (*ssh.Client, error) { - authMethod, err := publicKey(sshKey) - if err != nil { - return nil, err - } - config = &ssh.ClientConfig{ - User: sshUser, - Auth: []ssh.AuthMethod{ - authMethod, - }, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - } - conn, err := ssh.Dial("tcp", host, config) - if err != nil { - return nil, err - } - return conn, nil -} - -func runsshCommand(cmd string, conn *ssh.Client) (string, error) { - session, err := conn.NewSession() - if err != nil { - return "", err - } - defer session.Close() - var stdoutBuf bytes.Buffer - var stderrBuf bytes.Buffer - session.Stdout = &stdoutBuf - session.Stderr = &stderrBuf - if err := session.Run(cmd); err != nil { - return "", err - } - return fmt.Sprintf("%s", stdoutBuf.String()), err -} - -func parseNodes(kubeConfig string, print bool, cmd string) ([]Node, error) { - nodes := make([]Node, 0, 10) - res, err := RunCommand(cmd) - if err != nil { - return nil, err - } - rawNodes := strings.TrimSpace(res) - split := strings.Split(rawNodes, "\n") - for _, rec := range split { - if strings.TrimSpace(rec) != "" { - fields := strings.Fields(rec) - n := Node{ - Name: fields[0], - Status: fields[1], - Roles: fields[2], - Version: fields[4], - InternalIP: fields[5], - ExternalIP: fields[6], - } - nodes = append(nodes, n) - } - } - if print { - fmt.Println(rawNodes) - } - - return nodes, nil -} - -func parsePods(kubeconfig string, print bool, cmd string) ([]Pod, error) { - pods := make([]Pod, 0, 10) - res, _ := RunCommand(cmd) - rawPods := strings.TrimSpace(res) - - split := strings.Split(rawPods, "\n") - for _, rec := range split { - fields := strings.Fields(string(rec)) - p := Pod{ - NameSpace: fields[0], - Name: fields[1], - Ready: fields[2], - Status: fields[3], - Restarts: fields[4], - NodeIP: fields[6], - Node: fields[7], - } - pods = append(pods, p) - } - if print { - fmt.Println(rawPods) - } - - return pods, nil -} - -func Basepath() string { - _, b, _, _ := runtime.Caller(0) - return filepath.Join(filepath.Dir(b), "../..") -} - -func PrintFileContents(f ...string) error { - for _, file := range f { - content, err := os.ReadFile(file) - if err != nil { - return err - } - fmt.Println(string(content) + "\n") - } - - return nil -} - -// RunCommandOnNode executes a command from within the given node -func RunCommandOnNode(cmd string, ServerIP string, sshUser string, sshKey string) (string, error) { - Server := ServerIP + ":22" - conn, err := configureSSH(Server, sshUser, sshKey) - if err != nil { - return "", err - } - res, err := runsshCommand(cmd, conn) - res = strings.TrimSpace(res) - - return res, err -} - -// RunCommand executes a command on the host -func RunCommand(cmd string) (string, error) { - c := exec.Command("bash", "-c", cmd) - out, err := c.CombinedOutput() - - return string(out), err -} - -// CountOfStringInSlice Used to count the pods using prefix passed in the list of pods -func CountOfStringInSlice(str string, pods []Pod) int { - var count int - for _, p := range pods { - if strings.Contains(p.Name, str) { - count++ - } - } - - return count -} - -func DeployWorkload(workload, kubeconfig string) (string, error) { - resourceDir := Basepath() + "/tests/terraform/resource_files" - files, err := os.ReadDir(resourceDir) - if err != nil { - return "", fmt.Errorf("%s : Unable to read resource manifest file for %s", err, workload) - } - for _, f := range files { - filename := filepath.Join(resourceDir, f.Name()) - if strings.TrimSpace(f.Name()) == workload { - cmd := "kubectl apply -f " + filename + " --kubeconfig=" + kubeconfig - return RunCommand(cmd) - } - } - - return "", nil -} - -func RemoveWorkload(workload, kubeconfig string) (string, error) { - resourceDir := Basepath() + "/tests/terraform/resource_files" - files, err := os.ReadDir(resourceDir) - if err != nil { - return "", fmt.Errorf("%s : Unable to read resource manifest file for %s", err, workload) - } - for _, f := range files { - filename := filepath.Join(resourceDir, f.Name()) - if strings.TrimSpace(f.Name()) == workload { - cmd := "kubectl delete -f " + filename + " --kubeconfig=" + kubeconfig - return RunCommand(cmd) - } - } - - return "", nil -} - -func FetchClusterIP(kubeconfig string, namespace string, servicename string) (string, string, error) { - ipCmd := "kubectl get svc " + servicename + " -n " + namespace + - " -o jsonpath='{.spec.clusterIP}' --kubeconfig=" + kubeconfig - ip, err := RunCommand(ipCmd) - if err != nil { - return "", "", err - } - portCmd := "kubectl get svc " + servicename + " -n " + namespace + - " -o jsonpath='{.spec.ports[0].port}' --kubeconfig=" + kubeconfig - port, err := RunCommand(portCmd) - if err != nil { - return "", "", err - } - - return ip, port, err -} - -func FetchNodeExternalIP(kubeconfig string) []string { - cmd := "kubectl get node --output=jsonpath='{range .items[*]} " + - "{ .status.addresses[?(@.type==\"ExternalIP\")].address}' --kubeconfig=" + kubeconfig - time.Sleep(10 * time.Second) - res, _ := RunCommand(cmd) - nodeExternalIP := strings.Trim(res, " ") - nodeExternalIPs := strings.Split(nodeExternalIP, " ") - - return nodeExternalIPs -} - -func FetchIngressIP(namespace string, kubeconfig string) ([]string, error) { - cmd := "kubectl get ingress -n " + namespace + - " -o jsonpath='{.items[0].status.loadBalancer.ingress[*].ip}' --kubeconfig=" + kubeconfig - res, err := RunCommand(cmd) - if err != nil { - return nil, err - } - ingressIP := strings.Trim(res, " ") - if ingressIP != "" { - ingressIPs := strings.Split(ingressIP, " ") - return ingressIPs, nil - } - - return nil, nil -} - -func Nodes(kubeConfig string, print bool) ([]Node, error) { - cmd := "kubectl get nodes --no-headers -o wide --kubeconfig=" + kubeConfig - return parseNodes(kubeConfig, print, cmd) -} - -func WorkerNodes(kubeConfig string, print bool) ([]Node, error) { - cmd := "kubectl get node -o jsonpath='{range .items[*]}{@.metadata.name} " + - "{@.status.conditions[-1].type} {@.status.nodeInfo.kubeletVersion} " + - "{@.status.addresses[?(@.type==\"InternalIP\")].address} " + - "{@.status.addresses[?(@.type==\"ExternalIP\")].address} {@.spec.taints[*].effect}{\"\\n\"}{end}' " + - "--kubeconfig=" + kubeConfig + " | grep -v NoSchedule | grep -v NoExecute" - - return parseNodes(kubeConfig, print, cmd) -} - -func Pods(kubeconfig string, print bool) ([]Pod, error) { - cmd := "kubectl get pods -o wide --no-headers -A --kubeconfig=" + kubeconfig - return parsePods(kubeconfig, print, cmd) -} - -func IsAppRunning(namespace, appName string, kubeconfig string) (string, error) { - cmd := "kubectl get pods -n" + namespace + " -o=name -l k8s-app=" + appName + " --field-selector=status.phase=Running --kubeconfig=" + kubeconfig - return RunCommand(cmd) -} diff --git a/tests/terraform/upgradecluster/upgradecluster_test.go b/tests/terraform/upgradecluster/upgradecluster_test.go deleted file mode 100644 index ec282def38c..00000000000 --- a/tests/terraform/upgradecluster/upgradecluster_test.go +++ /dev/null @@ -1,453 +0,0 @@ -package upgradecluster - -import ( - "flag" - "fmt" - "regexp" - "strings" - "testing" - - "github.com/rancher/rke2/tests/terraform" - "github.com/rancher/rke2/tests/terraform/createcluster" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var ( - destroy = flag.Bool("destroy", false, "a bool") - upgradeVersion = flag.String("upgradeVersion", "", "Version to upgrade the cluster to") -) - -func Test_TFUpgradeClusterValidation(t *testing.T) { - RegisterFailHandler(Fail) - flag.Parse() - - RunSpecs(t, "Upgrade Cluster Test Suite") -} - -var _ = Describe("Upgrade Tests:", func() { - Context("Build Cluster:", func() { - It("Starts up with no issues", func() { - status, err := createcluster.BuildCluster(&testing.T{}, false) - Expect(err).NotTo(HaveOccurred()) - Expect(status).To(Equal("cluster created")) - - defer GinkgoRecover() - - fmt.Println("Server Node IPS:", createcluster.MasterIPs) - fmt.Println("Agent Node IPS:", createcluster.WorkerIPs) - terraform.PrintFileContents(createcluster.KubeConfigFile) - - Expect(createcluster.KubeConfigFile).ShouldNot(BeEmpty()) - Expect(createcluster.MasterIPs).ShouldNot(BeEmpty()) - - if createcluster.NumWorkers > 0 { - Expect(createcluster.WorkerIPs).ShouldNot(BeEmpty()) - } else { - Expect(createcluster.WorkerIPs).Should(BeEmpty()) - } - Expect(createcluster.KubeConfigFile).ShouldNot(BeEmpty()) - }) - - It("Checks Node and Pod Status", func() { - defer func() { - fmt.Printf("\nFetching node status preupgrade\n") - - _, err := terraform.Nodes(createcluster.KubeConfigFile, true) - if err != nil { - fmt.Println("Error retrieving nodes preupgrade: ", err) - } - _, err = terraform.Pods(createcluster.KubeConfigFile, true) - if err != nil { - fmt.Println("Error retrieving pods preupgrade: ", err) - } - }() - - expectedNodeCount := createcluster.NumServers + createcluster.NumWorkers - Eventually(func(g Gomega) { - nodes, err := terraform.Nodes(createcluster.KubeConfigFile, false) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(nodes)).To(Equal(expectedNodeCount), - "Number of nodes should match the spec") - - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready"), - "Nodes should all be in Ready state", node.Name) - } - }, "420s", "5s").Should(Succeed()) - - fmt.Printf("\nFetching pod status preupgrade\n") - Eventually(func(g Gomega) { - pods, err := terraform.Pods(createcluster.KubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - - re := regexp.MustCompile("[0-9]+") - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - g.Expect(pod.Restarts).Should(Equal("0"), pod.Name) - numRunning := re.FindAllString(pod.Ready, 2) - g.Expect(numRunning[0]).Should(Equal(numRunning[1]), pod.Name, - "should have all containers running") - } - } - }, "600s", "5s").Should(Succeed()) - }) - }) - - Context("Preupgrade Validations:", func() { - It("Verifies ClusterIP Service Preupgrade", func() { - namespace := "auto-clusterip" - _, err := terraform.DeployWorkload("clusterip.yaml", createcluster.KubeConfigFile) - - Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed") - - Eventually(func(g Gomega) { - cmd := "kubectl get pods -n " + namespace + " -o=name " + - "-l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running " + - "--kubeconfig=" + createcluster.KubeConfigFile - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-clusterip")) - }, "240s", "5s").Should(Succeed()) - - clusterip, port, _ := terraform.FetchClusterIP(createcluster.KubeConfigFile, - namespace, "nginx-clusterip-svc") - - cmd := "curl -sL --insecure http://" + clusterip + ":" + port + "/name.html" - nodeExternalIP := terraform.FetchNodeExternalIP(createcluster.KubeConfigFile) - - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - res, err := terraform.RunCommandOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-clusterip")) - }, "240s", "10s").Should(Succeed()) - } - }) - - It("Verifies NodePort Service Preupgrade", func() { - namespace := "auto-nodeport" - _, err := terraform.DeployWorkload("nodeport.yaml", createcluster.KubeConfigFile) - - Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed") - - nodeExternalIP := terraform.FetchNodeExternalIP(createcluster.KubeConfigFile) - cmd := "kubectl get service -n " + namespace + " nginx-nodeport-svc --kubeconfig=" + - createcluster.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - - nodeport, err := terraform.RunCommand(cmd) - Expect(err).NotTo(HaveOccurred()) - - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - cmd := "kubectl get pods -n " + namespace + " -o=name -l k8s-app=nginx-app-nodeport " + - "--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile - res, err := terraform.RunCommand(cmd) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "240s", "5s").Should(Succeed()) - - cmd = "curl -sL --insecure http://" + ip + ":" + nodeport + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "240s", "5s").Should(Succeed()) - } - }) - - It("Verifies Ingress Preupgrade", func() { - namespace := "auto-ingress" - _, err := terraform.DeployWorkload("ingress.yaml", createcluster.KubeConfigFile) - - Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") - - Eventually(func(g Gomega) { - cmd := "kubectl get pods -n " + namespace + " -o=name -l k8s-app=nginx-app-ingress " + - "--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile - res, err := terraform.RunCommand(cmd) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-ingress")) - }, "240s", "5s").Should(Succeed()) - - var ingressIps []string - nodes, err := terraform.WorkerNodes(createcluster.KubeConfigFile, false) - if err != nil { - fmt.Println("Error retrieving nodes: ", err) - } - - Eventually(func(g Gomega) { - ingressIps, err = terraform.FetchIngressIP(namespace, createcluster.KubeConfigFile) - - g.Expect(err).NotTo(HaveOccurred(), "Ingress ip is not returned") - g.Expect(len(ingressIps)).To(Equal(len(nodes)), - "Number of ingress IPs should match the number of nodes") - }, "240s", "5s").Should(Succeed()) - - for _, ip := range ingressIps { - cmd := "curl -s --header host:foo1.bar.com" + " http://" + ip + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-ingress")) - }, "240s", "5s").Should(Succeed()) - } - }) - - It("Verifies Daemonset Preupgrade", func() { - _, err := terraform.DeployWorkload("daemonset.yaml", createcluster.KubeConfigFile) - - Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed") - - nodes, _ := terraform.WorkerNodes(createcluster.KubeConfigFile, false) - pods, _ := terraform.Pods(createcluster.KubeConfigFile, false) - Eventually(func(g Gomega) { - count := terraform.CountOfStringInSlice("test-daemonset", pods) - g.Expect(count).Should(Equal(len(nodes)), - "Daemonset pod count does not match node count") - }, "240s", "10s").Should(Succeed()) - }) - - It("Verifies DNS Access Preupgrade", func() { - namespace := "auto-dns" - _, err := terraform.DeployWorkload("dnsutils.yaml", createcluster.KubeConfigFile) - - Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed") - - Eventually(func(g Gomega) { - cmd := "kubectl get pods dnsutils " + "-n " + namespace + - " --kubeconfig=" + createcluster.KubeConfigFile - res, _ := terraform.RunCommand(cmd) - - g.Expect(res).Should(ContainSubstring("dnsutils")) - g.Expect(res).Should(ContainSubstring("Running")) - }, "240s", "2s").Should(Succeed()) - - Eventually(func(g Gomega) { - cmd := "kubectl -n " + namespace + " --kubeconfig=" + - createcluster.KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default" - res, _ := terraform.RunCommand(cmd) - - g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local")) - }, "240s", "2s").Should(Succeed()) - }) - }) - - Context("Upgrade via SUC:", func() { - It("Verifies Upgrade", func() { - namespace := "system-upgrade" - _, err := terraform.DeployWorkload("suc.yaml", createcluster.KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), - "system-upgrade-controller manifest did not deploy successfully") - - Eventually(func(g Gomega) { - cmd := "kubectl get pods " + "-n " + namespace + " --kubeconfig=" + createcluster.KubeConfigFile - res, _ := terraform.RunCommand(cmd) - - g.Expect(res).Should(ContainSubstring("system-upgrade-controller")) - g.Expect(res).Should(ContainSubstring("Running")) - }, "120s", "2s").Should(Succeed()) - - err = upgradeCluster(*upgradeVersion, createcluster.KubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "failed to upgrade cluster.") - - defer func() { - _, err := terraform.Nodes(createcluster.KubeConfigFile, true) - if err != nil { - fmt.Println("Error retrieving nodes postupgrade: ", err) - } - _, err = terraform.Pods(createcluster.KubeConfigFile, true) - if err != nil { - fmt.Println("Error retrieving pods postupgrade: ", err) - } - }() - - versionRegex := regexp.MustCompile("-rc[0-9]+") - k8sVersion := versionRegex.ReplaceAllString(*upgradeVersion, "") - - fmt.Printf("\nFetching node status postupgrade\n") - - expectedNodeCount := createcluster.NumServers + createcluster.NumWorkers - Eventually(func(g Gomega) { - nodes, err := terraform.Nodes(createcluster.KubeConfigFile, false) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(nodes)).To(Equal(expectedNodeCount), - "Number of nodes should match the spec") - - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready"), - "Nodes should all be in Ready state after upgrading", node.Name) - g.Expect(node.Version).Should(Equal(k8sVersion), - "Nodes should all be upgraded to the specified version", node.Name) - } - }, "900s", "30s").Should(Succeed()) - - fmt.Printf("\n Fetching pod status postupgrade\n") - Eventually(func(g Gomega) { - pods, err := terraform.Pods(createcluster.KubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - numRunning := versionRegex.FindAllString(pod.Ready, 2) - g.Expect(numRunning[0]).Should(Equal(numRunning[1]), pod.Name, "should have all containers running") - } - } - }, "600s", "5s").Should(Succeed()) - }) - }) - - Context("Postupgrade Validations:", func() { - It("Verifies ClusterIP Service Postupgrade", func() { - namespace := "auto-clusterip" - defer terraform.RemoveWorkload("clusterip.yaml", createcluster.KubeConfigFile) - - cmd := "kubectl get pods -n " + namespace + " -o=name -l " + - "k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + - createcluster.KubeConfigFile - res, err := terraform.RunCommand(cmd) - - Expect(err).NotTo(HaveOccurred()) - Expect(res).Should(ContainSubstring("test-clusterip")) - - clusterip, port, _ := terraform.FetchClusterIP(createcluster.KubeConfigFile, - namespace, "nginx-clusterip-svc") - cmd = "curl -sL --insecure http://" + clusterip + ":" + port + "/name.html" - nodeExternalIP := terraform.FetchNodeExternalIP(createcluster.KubeConfigFile) - - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - res, err := terraform.RunCommandOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-clusterip")) - }, "120s", "10s").Should(Succeed()) - } - }) - - It("Verifies NodePort Service Postupgrade", func() { - namespace := "auto-nodeport" - defer terraform.RemoveWorkload("nodeport.yaml", createcluster.KubeConfigFile) - - nodeExternalIP := terraform.FetchNodeExternalIP(createcluster.KubeConfigFile) - cmd := "kubectl get service -n " + namespace + " nginx-nodeport-svc --kubeconfig=" + - createcluster.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - nodeport, err := terraform.RunCommand(cmd) - - Expect(err).NotTo(HaveOccurred()) - - for _, ip := range nodeExternalIP { - Eventually(func(g Gomega) { - cmd := "kubectl get pods -n " + namespace + " -o=name -l " + - "k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + - createcluster.KubeConfigFile - res, err := terraform.RunCommand(cmd) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "120s", "5s").Should(Succeed()) - - cmd = "curl -sL --insecure http://" + ip + ":" + nodeport + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport")) - }, "120s", "5s").Should(Succeed()) - } - }) - - It("Verifies Ingress Postupgrade", func() { - namespace := "auto-ingress" - - defer terraform.RemoveWorkload("ingress.yaml", createcluster.KubeConfigFile) - - cmd := "kubectl get pods -n " + namespace + " -o=name -l k8s-app=nginx-app-ingress " + - "--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile - res, err := terraform.RunCommand(cmd) - - Expect(err).NotTo(HaveOccurred()) - Expect(res).Should(ContainSubstring("test-ingress")) - - var ingressIps []string - nodes, err := terraform.WorkerNodes(createcluster.KubeConfigFile, false) - if err != nil { - fmt.Println("Error retrieving nodes: ", err) - } - - Eventually(func(g Gomega) { - ingressIps, err = terraform.FetchIngressIP(namespace, createcluster.KubeConfigFile) - g.Expect(err).NotTo(HaveOccurred(), "Ingress ip is not returned") - g.Expect(len(ingressIps)).To(Equal(len(nodes)), - "Number of ingress IPs should match the number of nodes") - }, "120s", "5s").Should(Succeed()) - - for _, ip := range ingressIps { - cmd := "curl -s --header host:foo1.bar.com" + " http://" + ip + "/name.html" - Eventually(func(g Gomega) { - res, err := terraform.RunCommand(cmd) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-ingress")) - }, "120s", "5s").Should(Succeed()) - } - }) - - It("Verifies Daemonset Postupgrade", func() { - nodes, _ := terraform.WorkerNodes(createcluster.KubeConfigFile, false) - pods, _ := terraform.Pods(createcluster.KubeConfigFile, false) - count := terraform.CountOfStringInSlice("test-daemonset", pods) - - defer terraform.RemoveWorkload("daemonset.yaml", createcluster.KubeConfigFile) - Expect(count).Should((Equal(len(nodes))), - "Daemonset pod count does not match node count") - }) - - It("Verifies DNS Access Postupgrade", func() { - namespace := "auto-dns" - _, err := terraform.DeployWorkload("dnsutils.yaml", createcluster.KubeConfigFile) - - Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed") - - defer terraform.RemoveWorkload("dnsutils.yaml", createcluster.KubeConfigFile) - Eventually(func(g Gomega) { - cmd := "kubectl get pods dnsutils " + "-n " + namespace + - " --kubeconfig=" + createcluster.KubeConfigFile - res, _ := terraform.RunCommand(cmd) - - g.Expect(res).Should(ContainSubstring("dnsutils")) - g.Expect(res).Should(ContainSubstring("Running")) - }, "120s", "2s").Should(Succeed()) - - Eventually(func(g Gomega) { - cmd := "kubectl -n " + namespace + " --kubeconfig=" + createcluster.KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default" - res, _ := terraform.RunCommand(cmd) - - g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local")) - }, "120s", "2s").Should(Succeed()) - }) - }) -}) - -var _ = BeforeEach(func() { - if *destroy { - Skip("Cluster is being Deleted") - } -}) - -var _ = AfterEach(func() { - if CurrentSpecReport().Failed() { - fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText()) - } else { - fmt.Printf("\nPASSED! %s\n", CurrentSpecReport().FullText()) - } -})