From 6a7ce0cfab385d3372274c7f16e8f3f89f669e1b Mon Sep 17 00:00:00 2001 From: John Pitman Date: Wed, 30 Oct 2024 10:39:36 -0400 Subject: [PATCH 1/4] feat: add e2e test framework and initial tests Signed-off-by: John Pitman --- .github/workflows/ci.yaml | 124 +++- Makefile | 13 + test/e2e2/README.md | 69 +++ test/e2e2/basic_test.go | 140 +++++ test/e2e2/fixture/argosync.go | 53 ++ test/e2e2/fixture/fixture.go | 153 +++++ test/e2e2/fixture/kubeclient.go | 271 +++++++++ test/e2e2/fixture/kubeconfig.go | 58 ++ test/e2e2/fixture_test.go | 543 ++++++++++++++++++ test/e2e2/sync_test.go | 256 +++++++++ test/e2e2/test-env/.gitignore | 1 + test/e2e2/test-env/Procfile | 3 + .../agent-autonomous/argocd-secret.yaml | 4 + .../agent-autonomous/kustomization.yaml | 11 + .../agent-managed/argocd-cmd-params-cm.yaml | 8 + .../test-env/agent-managed/argocd-secret.yaml | 4 + .../test-env/agent-managed/kustomization.yaml | 12 + .../test-env/apps/autonomous-guestbook.yaml | 19 + .../e2e2/test-env/apps/managed-guestbook.yaml | 17 + test/e2e2/test-env/clean-apps.sh | 30 + .../e2e2/test-env/common/default-project.yaml | 15 + test/e2e2/test-env/common/kustomization.yaml | 3 + test/e2e2/test-env/common/redis-secret.yaml | 7 + .../control-plane/appproject-default.yaml | 15 + .../control-plane/argocd-cmd-params-cm.yaml | 6 + .../test-env/control-plane/argocd-secret.yaml | 4 + .../test-env/control-plane/kustomization.yaml | 18 + .../test-env/control-plane/redis-service.yaml | 7 + .../control-plane/repo-server-service.yaml | 7 + .../control-plane/server-service.yaml | 7 + test/e2e2/test-env/gen-creds.sh | 45 ++ .../resources/metallb-ipaddresspool.yaml | 9 + .../resources/scc-anyuid-seccomp-netbind.yaml | 53 ++ test/e2e2/test-env/resources/vcluster.yaml | 15 + test/e2e2/test-env/setup-vcluster-env.sh | 255 ++++++++ test/e2e2/test-env/start-agent-autonomous.sh | 24 + test/e2e2/test-env/start-agent-managed.sh | 24 + test/e2e2/test-env/start-principal.sh | 24 + 38 files changed, 2326 insertions(+), 1 deletion(-) create mode 100644 test/e2e2/README.md create mode 100644 test/e2e2/basic_test.go create mode 100644 test/e2e2/fixture/argosync.go create mode 100644 test/e2e2/fixture/fixture.go create mode 100644 test/e2e2/fixture/kubeclient.go create mode 100644 test/e2e2/fixture/kubeconfig.go create mode 100644 test/e2e2/fixture_test.go create mode 100644 test/e2e2/sync_test.go create mode 100644 test/e2e2/test-env/.gitignore create mode 100644 test/e2e2/test-env/Procfile create mode 100644 test/e2e2/test-env/agent-autonomous/argocd-secret.yaml create mode 100644 test/e2e2/test-env/agent-autonomous/kustomization.yaml create mode 100644 test/e2e2/test-env/agent-managed/argocd-cmd-params-cm.yaml create mode 100644 test/e2e2/test-env/agent-managed/argocd-secret.yaml create mode 100644 test/e2e2/test-env/agent-managed/kustomization.yaml create mode 100644 test/e2e2/test-env/apps/autonomous-guestbook.yaml create mode 100644 test/e2e2/test-env/apps/managed-guestbook.yaml create mode 100755 test/e2e2/test-env/clean-apps.sh create mode 100644 test/e2e2/test-env/common/default-project.yaml create mode 100644 test/e2e2/test-env/common/kustomization.yaml create mode 100644 test/e2e2/test-env/common/redis-secret.yaml create mode 100644 test/e2e2/test-env/control-plane/appproject-default.yaml create mode 100644 test/e2e2/test-env/control-plane/argocd-cmd-params-cm.yaml create mode 100644 test/e2e2/test-env/control-plane/argocd-secret.yaml create mode 100644 test/e2e2/test-env/control-plane/kustomization.yaml create mode 100644 test/e2e2/test-env/control-plane/redis-service.yaml create mode 100644 test/e2e2/test-env/control-plane/repo-server-service.yaml create mode 100644 test/e2e2/test-env/control-plane/server-service.yaml create mode 100755 test/e2e2/test-env/gen-creds.sh create mode 100644 test/e2e2/test-env/resources/metallb-ipaddresspool.yaml create mode 100644 test/e2e2/test-env/resources/scc-anyuid-seccomp-netbind.yaml create mode 100644 test/e2e2/test-env/resources/vcluster.yaml create mode 100755 test/e2e2/test-env/setup-vcluster-env.sh create mode 100755 test/e2e2/test-env/start-agent-autonomous.sh create mode 100755 test/e2e2/test-env/start-agent-managed.sh create mode 100755 test/e2e2/test-env/start-principal.sh diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 350c762..1053335 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -3,6 +3,7 @@ on: push: branches: - 'main' + - 'gitops-5159-e2e-test-framework' pull_request: branches: - 'main' @@ -14,6 +15,26 @@ permissions: contents: read jobs: + changes: + runs-on: ubuntu-latest + outputs: + code: ${{ steps.filter.outputs.code_any_changed }} + docs: ${{ steps.filter.outputs.docs_any_changed }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: tj-actions/changed-files@c3a1bb2c992d77180ae65be6ae6c166cf40f857c # v45.0.3 + id: filter + with: + files_yaml: | + code: + - '!**.md' + - '!**/*.md' + - '!docs/**' + - '!Dockerfile.*' + - '!hack/**' + docs: + - 'docs/**' + check-go: name: Check go modules synchronicity runs-on: ubuntu-22.04 @@ -58,7 +79,10 @@ jobs: pull-requests: read # for golangci/golangci-lint-action to fetch pull requests checks: write name: Lint Go code + if: ${{ needs.changes.outputs.code == 'true' }} runs-on: ubuntu-22.04 + needs: + - changes steps: - name: Checkout code uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -75,7 +99,10 @@ jobs: build-go: name: Build & cache Go code + if: ${{ needs.changes.outputs.code == 'true' }} runs-on: ubuntu-22.04 + needs: + - changes steps: - name: Checkout code uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -91,7 +118,10 @@ jobs: test: name: Run unit tests + if: ${{ needs.changes.outputs.code == 'true' }} runs-on: ubuntu-22.04 + needs: + - changes steps: - name: Checkout code uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -110,4 +140,96 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} file: test/out/coverage.out - + test-e2e: + name: Run end-to-end tests + if: ${{ needs.changes.outputs.code == 'true' }} + runs-on: ubuntu-22.04 + needs: + - changes + env: + GOPATH: /home/runner/go + steps: + - name: Install required packages + run: | + sudo apt-get install libpwquality-tools + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup Golang + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: ${{ env.GOLANG_VERSION }} + - name: GH actions workaround - Kill XSP4 process + run: | + sudo pkill mono || true + - name: Install microk8s + run: | + set -x + sudo snap install --classic microk8s + sudo microk8s enable metallb:192.168.56.100-192.168.56.254 + sudo microk8s enable hostpath-storage + mkdir -p $HOME/.kube + sudo microk8s config > $HOME/.kube/config + sudo chown runner $HOME/.kube/config + sudo chmod go-r $HOME/.kube/config + kubectl version + - name: Restore go build cache + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + with: + path: ~/.cache/go-build + key: ${{ runner.os }}-go-build-v1-${{ github.run_id }} + - name: Add ~/go/bin to PATH + run: | + echo "/home/runner/go/bin" >> $GITHUB_PATH + - name: Add /usr/local/bin to PATH + run: | + echo "/usr/local/bin" >> $GITHUB_PATH + - name: Install vcluster + run: | + curl -L -o vcluster "https://github.com/loft-sh/vcluster/releases/latest/download/vcluster-linux-amd64" && sudo install -c -m 0755 vcluster /usr/local/bin && rm -f vcluster + vcluster --version + - name: Download Go dependencies + run: | + go mod download + go install github.com/mattn/goreman@latest + - name: Set up the test environment + run: | + make setup-e2e2 + - name: Run the principal and agents + run: | + make start-argocd-agent 2>&1 | sed -r "s/[[:cntrl:]]\[[0-9]{1,3}m//g" > /tmp/e2e-argocd-agent.log & + sleep 10 + - name: Run the e2e tests + run: | + set -o pipefail + make test-e2e2 2>&1 | tee /tmp/test-e2e.log + goreman run stop-all || echo "goreman trouble" + sleep 30 + - name: Create Argo CD logs + run: | + kubectl --context vcluster-agent-autonomous logs -n argocd argocd-application-controller-0 > /tmp/vcluster-agent-autonomous-controller.log + kubectl --context vcluster-agent-managed logs -n argocd argocd-application-controller-0 > /tmp/vcluster-agent-managed-controller.log + if: ${{ failure() }} + - name: Upload e2e-argocd-agent logs + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + with: + name: e2e-argocd-agent.log + path: /tmp/e2e-argocd-agent.log + if: ${{ failure() }} + - name: Upload test logs + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + with: + name: test-e2e.log + path: /tmp/test-e2e.log + if: ${{ failure() }} + - name: Upload vcluster-agent-autonomous-controller logs + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + with: + name: vcluster-agent-autonomous-controller.log + path: /tmp/vcluster-agent-autonomous-controller.log + if: ${{ failure() }} + - name: Upload vcluster-agent-managed-controller logs + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + with: + name: vcluster-agent-managed-controller.log + path: /tmp/vcluster-agent-managed-controller.log + if: ${{ failure() }} diff --git a/Makefile b/Makefile index fce4611..d6c20fc 100644 --- a/Makefile +++ b/Makefile @@ -24,6 +24,19 @@ MOCKERY_V2_VERSION?=v2.43.0 .PHONY: build build: agent principal +.PHONY: setup-e2e2 +setup-e2e2: + test/e2e2/test-env/setup-vcluster-env.sh create + +.PHONY: start-argocd-agent +start-argocd-agent: + test/e2e2/test-env/gen-creds.sh + goreman -f test/e2e2/test-env/Procfile start + +.PHONY: test-e2e2 +test-e2e2: + go test -count=1 -v -race -timeout 30m github.com/argoproj-labs/argocd-agent/test/e2e2 + .PHONY: test test: mkdir -p test/out diff --git a/test/e2e2/README.md b/test/e2e2/README.md new file mode 100644 index 0000000..1ea7da2 --- /dev/null +++ b/test/e2e2/README.md @@ -0,0 +1,69 @@ +# Running the end-to-end tests locally + +## Setup + +The e2e test scripts require [vcluster](https://github.com/loft-sh/vcluster) to be installed on your system. They also require an administrative connection to a host cluster. + +**Warning** Don't run these scripts against a cluster that you care about, there is no guarantee they won't break the cluster in some way. + +The scripts use `vcluster` to create three virtual clusters on the host cluster: + +* vcluster-control-plane - For hosting the control plane and principal +* vcluster-agent-managed - A cluster with agent in managed mode +* vcluster-agent-autonomous - A cluster with agent in autonomous mode + +The scripts will install Argo CD to each of those vclusters, in varying degrees of completeness. + +Both the vcluster and Argo CD installations require that LoadBalancer functionality is available on the host cluster. + +## Running the tests + +To setup the test environment on the cluster, execute the following command from the repository root: + +```shell +make setup-e2e2 +``` + +To run the principal and agents, execute the following command from the repository root: + +```shell +make start-argocd-agent +``` + +To run the tests, execute the following command from the repository root in a separate terminal instance: + +```shell +make test-e2e2 +``` + +# Writing new end-to-end tests + +There is some helper code in the `fixture` subdirectory. The tests use the [stretchr/testify](https://github.com/stretchr/testify) test framework. New tests should be created as part of a test suite, either an existing one or, preferably, as part of a new one. + +A new test suite should embed the `fixture.BaseSuite` struct, which will provide some automatic setup and teardown functionality for the suite. + +```go +type MyTestSuite struct { + fixture.BaseSuite +} +``` + +This will configure your suite with a `context.Context` as well as three `kubernetes clients`, one for the principal vcluster, one for the managed-agent vcluster, and one for the autonomous-agent vcluster. This is implemented in the `SetupSuite()` method which has been defined on the BaseSuite. If your suite does not need it's own `SetupSuite()` method, the one from BaseSuite will be used automatically. If you do need to specify a `SetupSuite()` method for your own suite, be sure to call the BaseSuite's method as the first thing. + +```go +func (suite *MyTestSuite) SetupSuite() { + suite.BaseSuite.SetupSuite() + ... +} +``` + +The BaseSuite also defines the `SetupTest()` and `TearDownTest()` methods to perform cleanup. If your suite does not need it's own version of these methods, the ones from BaseSuite will be used automatically. If you do need to specify one of these methods for your own suite, be sure to call the BaseSuite's method as the first thing. + +```go +func (suite *MyTestSuite) TearDownTest() { + suite.BaseSuite.TearDownTest() + ... +} +``` + +The kubernetes client is a wrapper around `client-go/dynamic`. It is able to access the ArgoCD types as well as the types from `k8s.io/api/core/v1`, `k8s.io/api/apps/v1`, and `k8s.io/api/rbac/v1`. If you need support for additional types, you can add then to the scheme used in the `NewKubeClient` function in `fixture/kubeclient.go` diff --git a/test/e2e2/basic_test.go b/test/e2e2/basic_test.go new file mode 100644 index 0000000..5235e34 --- /dev/null +++ b/test/e2e2/basic_test.go @@ -0,0 +1,140 @@ +// Copyright 2024 The argocd-agent Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e2 + +import ( + "testing" + "time" + + "github.com/argoproj-labs/argocd-agent/test/e2e2/fixture" + argoapp "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/stretchr/testify/suite" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +type BasicTestSuite struct { + fixture.BaseSuite +} + +func (suite *BasicTestSuite) Test_Agent_Managed() { + requires := suite.Require() + + // Create a managed application in the principal's cluster + app := argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: "agent-managed", + }, + Spec: argoapp.ApplicationSpec{ + Project: "default", + Source: &argoapp.ApplicationSource{ + RepoURL: "https://github.com/argoproj/argocd-example-apps", + TargetRevision: "HEAD", + Path: "kustomize-guestbook", + }, + Destination: argoapp.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "guestbook", + }, + SyncPolicy: &argoapp.SyncPolicy{ + SyncOptions: argoapp.SyncOptions{ + "CreateNamespace=true", + }, + }, + }, + } + err := suite.PrincipalClient.Create(suite.Ctx, &app, metav1.CreateOptions{}) + requires.NoError(err) + + key := fixture.ToNamespacedName(&app) + + // Ensure the app has been pushed to the managed-agent + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.ManagedAgentClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + return err == nil + }, 30*time.Second, 1*time.Second) + + // Delete the app from the principal + err = suite.PrincipalClient.Delete(suite.Ctx, &app, metav1.DeleteOptions{}) + requires.NoError(err) + + // Ensure the app has been deleted from the managed-agent + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.ManagedAgentClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, 30*time.Second, 1*time.Second) +} + +func (suite *BasicTestSuite) Test_Agent_Autonomous() { + requires := suite.Require() + + // Create an autonomous application on the autonomous-agent's cluster + app := argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: "argocd", + Finalizers: []string{ + "resources-finalizer.argocd.argoproj.io", + }, + }, + Spec: argoapp.ApplicationSpec{ + Project: "default", + Source: &argoapp.ApplicationSource{ + RepoURL: "https://github.com/argoproj/argocd-example-apps", + TargetRevision: "HEAD", + Path: "kustomize-guestbook", + }, + Destination: argoapp.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "guestbook", + }, + SyncPolicy: &argoapp.SyncPolicy{ + SyncOptions: argoapp.SyncOptions{ + "CreateNamespace=true", + }, + }, + }, + } + err := suite.AutonomousAgentClient.Create(suite.Ctx, &app, metav1.CreateOptions{}) + requires.NoError(err) + + key := types.NamespacedName{Name: app.Name, Namespace: "agent-autonomous"} + + // Ensure the app has been pushed to the principal + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.PrincipalClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + return err == nil + }, 30*time.Second, 1*time.Second) + + // Delete the app from the autonomous-agent + err = suite.AutonomousAgentClient.Delete(suite.Ctx, &app, metav1.DeleteOptions{}) + requires.NoError(err) + + // Ensure the app has been deleted from the principal + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.PrincipalClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, 30*time.Second, 1*time.Second) +} + +func TestBasicTestSuite(t *testing.T) { + suite.Run(t, new(BasicTestSuite)) +} diff --git a/test/e2e2/fixture/argosync.go b/test/e2e2/fixture/argosync.go new file mode 100644 index 0000000..befb502 --- /dev/null +++ b/test/e2e2/fixture/argosync.go @@ -0,0 +1,53 @@ +// Copyright 2024 The argocd-agent Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fixture + +import ( + "context" + + argoapp "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// SyncApplication syncs the named application using the "hook" strategy +func SyncApplication(ctx context.Context, appKey types.NamespacedName, kclient KubeClient) error { + operation := argoapp.Operation{ + Sync: &argoapp.SyncOperation{ + SyncStrategy: &argoapp.SyncStrategy{ + Hook: &argoapp.SyncStrategyHook{}, + }, + }, + InitiatedBy: argoapp.OperationInitiator{ + Username: "e2e", + }, + } + err := SyncApplicationWithOperation(ctx, appKey, operation, kclient) + return err +} + +// SyncApplicationWithOperation syncs the named application using the provided operation +func SyncApplicationWithOperation(ctx context.Context, appKey types.NamespacedName, operation argoapp.Operation, kclient KubeClient) error { + var err error + var app argoapp.Application + + err = kclient.Get(ctx, appKey, &app, metav1.GetOptions{}) + if err != nil { + return err + } + app.Operation = &operation + err = kclient.Update(ctx, &app, metav1.UpdateOptions{}) + return err +} diff --git a/test/e2e2/fixture/fixture.go b/test/e2e2/fixture/fixture.go new file mode 100644 index 0000000..a7c005a --- /dev/null +++ b/test/e2e2/fixture/fixture.go @@ -0,0 +1,153 @@ +// Copyright 2024 The argocd-agent Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fixture + +import ( + "context" + "fmt" + "time" + + argoapp "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/stretchr/testify/suite" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +type BaseSuite struct { + suite.Suite + Ctx context.Context + PrincipalClient KubeClient + ManagedAgentClient KubeClient + AutonomousAgentClient KubeClient +} + +func (suite *BaseSuite) SetupSuite() { + requires := suite.Require() + + suite.Ctx = context.Background() + + config, err := GetSystemKubeConfig("vcluster-control-plane") + requires.Nil(err) + suite.PrincipalClient, err = NewKubeClient(config) + requires.Nil(err) + + config, err = GetSystemKubeConfig("vcluster-agent-managed") + requires.Nil(err) + suite.ManagedAgentClient, err = NewKubeClient(config) + requires.Nil(err) + + config, err = GetSystemKubeConfig("vcluster-agent-autonomous") + requires.Nil(err) + suite.AutonomousAgentClient, err = NewKubeClient(config) + requires.Nil(err) +} + +func (suite *BaseSuite) SetupTest() { + err := CleanUp(suite.Ctx, suite.PrincipalClient, suite.ManagedAgentClient, suite.AutonomousAgentClient) + suite.Assert().Nil(err) + suite.T().Logf("Test begun at: %v", time.Now()) +} + +func (suite *BaseSuite) TearDownTest() { + suite.T().Logf("Test ended at: %v", time.Now()) + err := CleanUp(suite.Ctx, suite.PrincipalClient, suite.ManagedAgentClient, suite.AutonomousAgentClient) + suite.Assert().Nil(err) +} + +func ensureDeletion(ctx context.Context, kclient KubeClient, app argoapp.Application) error { + err := kclient.Delete(ctx, &app, metav1.DeleteOptions{}) + if errors.IsNotFound(err) { + // application is already deleted + return nil + } else if err != nil { + return err + } + + key := types.NamespacedName{Name: app.Name, Namespace: app.Namespace} + for count := 0; count < 120; count++ { + app := argoapp.Application{} + err := kclient.Get(ctx, key, &app, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } else if err == nil { + time.Sleep(1 * time.Second) + } else { + return err + } + } + + return fmt.Errorf("ensureDeletion: timeout waiting for deletion of %s/%s", key.Namespace, key.Name) +} + +func CleanUp(ctx context.Context, principalClient KubeClient, managedAgentClient KubeClient, autonomousAgentClient KubeClient) error { + + var list argoapp.ApplicationList + var err error + + // Delete all managed applications from the principal + list = argoapp.ApplicationList{} + err = principalClient.List(ctx, "agent-managed", &list, metav1.ListOptions{}) + if err != nil { + return err + } + for _, app := range list.Items { + err = ensureDeletion(ctx, principalClient, app) + if err != nil { + return err + } + } + + // Delete all applications from the autonomous agent + list = argoapp.ApplicationList{} + err = autonomousAgentClient.List(ctx, "argocd", &list, metav1.ListOptions{}) + if err != nil { + return err + } + for _, app := range list.Items { + err = ensureDeletion(ctx, autonomousAgentClient, app) + if err != nil { + return err + } + } + + // Delete any remaining managed applications left on the managed agent + list = argoapp.ApplicationList{} + err = managedAgentClient.List(ctx, "agent-managed", &list, metav1.ListOptions{}) + if err != nil { + return err + } + for _, app := range list.Items { + err = ensureDeletion(ctx, managedAgentClient, app) + if err != nil { + return err + } + } + + // Delete any remaining autonomous applications left on the principal + list = argoapp.ApplicationList{} + err = principalClient.List(ctx, "agent-autonomous", &list, metav1.ListOptions{}) + if err != nil { + return err + } + for _, app := range list.Items { + err = ensureDeletion(ctx, principalClient, app) + if err != nil { + return err + } + } + + return nil +} diff --git a/test/e2e2/fixture/kubeclient.go b/test/e2e2/fixture/kubeclient.go new file mode 100644 index 0000000..0b0c5da --- /dev/null +++ b/test/e2e2/fixture/kubeclient.go @@ -0,0 +1,271 @@ +// Copyright 2024 The argocd-agent Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fixture + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + + argoapp "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +type KubeObject interface { + metav1.Object + runtime.Object +} + +type KubeObjectList interface { + metav1.ListInterface + runtime.Object +} + +func ToNamespacedName(object KubeObject) types.NamespacedName { + return types.NamespacedName{Name: object.GetName(), Namespace: object.GetNamespace()} +} + +type KubeClient struct { + config *rest.Config + scheme *runtime.Scheme + dclient *dynamic.DynamicClient + mapper meta.RESTMapper + typeToResource map[reflect.Type]schema.GroupVersionResource +} + +func NewKubeClient(config *rest.Config) (KubeClient, error) { + var kclient KubeClient + + scheme := runtime.NewScheme() + err := argoapp.AddToScheme(scheme) + if err != nil { + return kclient, err + } + err = corev1.AddToScheme(scheme) + if err != nil { + return kclient, err + } + err = apps.AddToScheme(scheme) + if err != nil { + return kclient, err + } + err = rbacv1.AddToScheme(scheme) + if err != nil { + return kclient, err + } + + discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return kclient, err + } + + groupResources, err := restmapper.GetAPIGroupResources(discoveryClient) + if err != nil { + return kclient, err + } + + dclient, err := dynamic.NewForConfig(config) + if err != nil { + return kclient, err + } + + mapper := restmapper.NewDiscoveryRESTMapper(groupResources) + + typeToResource := make(map[reflect.Type]schema.GroupVersionResource) + + return KubeClient{ + config, + scheme, + dclient, + mapper, + typeToResource, + }, nil +} + +func (c KubeClient) Get(ctx context.Context, key types.NamespacedName, object KubeObject, options metav1.GetOptions) error { + resource, err := c.resourceFor(object) + if err != nil { + return err + } + + result, err := c.dclient.Resource(resource).Namespace(key.Namespace).Get(ctx, key.Name, options) + if err != nil { + return err + } + + // Can't use the following code because it produces a panic for an ArgoCD Application type + // panic: reflect: reflect.Value.Set using value obtained using unexported field + // err = runtime.DefaultUnstructuredConverter.FromUnstructured(result.UnstructuredContent(), object) + + b, err := result.MarshalJSON() + if err != nil { + return err + } + err = json.Unmarshal(b, object) + return err +} + +func (c KubeClient) List(ctx context.Context, namespace string, list KubeObjectList, options metav1.ListOptions) error { + resource, err := c.resourceFor(list) + if err != nil { + return err + } + + ulist, err := c.dclient.Resource(resource).Namespace(namespace).List(ctx, options) + if err != nil { + return err + } + + b, err := ulist.MarshalJSON() + if err != nil { + return err + } + err = json.Unmarshal(b, list) + return err +} + +func (c KubeClient) Create(ctx context.Context, object KubeObject, options metav1.CreateOptions) error { + resource, err := c.resourceFor(object) + if err != nil { + return err + } + + objectKind := object.GetObjectKind() + if len(objectKind.GroupVersionKind().Group) == 0 { + gvks, _, err := c.scheme.ObjectKinds(object) + if err != nil { + return err + } + object.GetObjectKind().SetGroupVersionKind(gvks[0]) + } + + obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object) + if err != nil { + return err + } + result, err := c.dclient.Resource(resource).Namespace(object.GetNamespace()).Create(ctx, &unstructured.Unstructured{Object: obj}, options) + if err != nil { + return err + } + + b, err := result.MarshalJSON() + if err != nil { + return err + } + + err = json.Unmarshal(b, object) + return err +} + +func (c KubeClient) Update(ctx context.Context, object KubeObject, options metav1.UpdateOptions) error { + resource, err := c.resourceFor(object) + if err != nil { + return err + } + + obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object) + if err != nil { + return err + } + result, err := c.dclient.Resource(resource).Namespace(object.GetNamespace()).Update(ctx, &unstructured.Unstructured{Object: obj}, options) + if err != nil { + return err + } + + b, err := result.MarshalJSON() + if err != nil { + return err + } + err = json.Unmarshal(b, object) + return err +} + +func (c KubeClient) Patch(ctx context.Context, object KubeObject, jsonPatch []interface{}, options metav1.PatchOptions) error { + resource, err := c.resourceFor(object) + if err != nil { + return err + } + + payload, err := json.Marshal(jsonPatch) + if err != nil { + return err + } + + result, err := c.dclient.Resource(resource).Namespace(object.GetNamespace()).Patch(ctx, object.GetName(), types.JSONPatchType, payload, options) + if err != nil { + return err + } + + b, err := result.MarshalJSON() + if err != nil { + return err + } + err = json.Unmarshal(b, object) + return err +} + +func (c KubeClient) Delete(ctx context.Context, object KubeObject, options metav1.DeleteOptions) error { + resource, err := c.resourceFor(object) + if err != nil { + return err + } + + err = c.dclient.Resource(resource).Namespace(object.GetNamespace()).Delete(ctx, object.GetName(), options) + return err +} + +func (c KubeClient) resourceFor(object runtime.Object) (schema.GroupVersionResource, error) { + objectType := reflect.TypeOf(object) + resource, found := c.typeToResource[objectType] + if !found { + gvks, _, err := c.scheme.ObjectKinds(object) + if err != nil { + return resource, err + } + if len(gvks) == 0 { + return resource, fmt.Errorf("got no GroupVersionKind values for type %T", object) + } + + gvk := gvks[0] + + // for a list, we want to return the resource of the items + if strings.HasSuffix(gvk.Kind, "List") && meta.IsListType(object) { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + mapping, err := c.mapper.RESTMapping(gvk.GroupKind()) + if err != nil { + return resource, err + } + + resource = mapping.Resource + c.typeToResource[objectType] = resource + } + return resource, nil +} diff --git a/test/e2e2/fixture/kubeconfig.go b/test/e2e2/fixture/kubeconfig.go new file mode 100644 index 0000000..6ec4386 --- /dev/null +++ b/test/e2e2/fixture/kubeconfig.go @@ -0,0 +1,58 @@ +// Copyright 2024 The argocd-agent Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fixture + +import ( + "os" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// GetSystemKubeConfig retrieves the given kube context from system-level +// Kubernetes config (e.g. ~/.kube/config). Use the empty string to retrieve +// the default context. +func GetSystemKubeConfig(kcontext string) (*rest.Config, error) { + + overrides := clientcmd.ConfigOverrides{} + if len(kcontext) > 0 { + overrides.CurrentContext = kcontext + } + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + clientConfig := clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, &overrides, os.Stdin) + + restConfig, err := clientConfig.ClientConfig() + if err != nil { + return nil, err + } + + err = setRateLimitOnRestConfig(restConfig) + if err != nil { + return nil, err + } + + return restConfig, nil +} + +// setRateLimitOnRestConfig sets the QPS and Burst for the rest config +func setRateLimitOnRestConfig(restConfig *rest.Config) error { + if restConfig != nil { + // Prevent rate limiting of our requests + restConfig.QPS = 100 + restConfig.Burst = 250 + } + return nil +} diff --git a/test/e2e2/fixture_test.go b/test/e2e2/fixture_test.go new file mode 100644 index 0000000..54bb827 --- /dev/null +++ b/test/e2e2/fixture_test.go @@ -0,0 +1,543 @@ +// Copyright 2024 The argocd-agent Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e2 + +import ( + "context" + "encoding/json" + "testing" + + "github.com/argoproj-labs/argocd-agent/test/e2e2/fixture" + argoapp "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/stretchr/testify/suite" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/restmapper" +) + +type FixtureTestSuite struct { + suite.Suite +} + +func (suite *FixtureTestSuite) SetupSuite() { + requires := suite.Require() + + ctx := context.Background() + + config, err := fixture.GetSystemKubeConfig("") + requires.NoError(err) + kclient, err := fixture.NewKubeClient(config) + requires.NoError(err) + + namespace := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-argocd-agent", + }, + } + err = kclient.Create(ctx, &namespace, metav1.CreateOptions{}) + requires.NoError(err) + + app := argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: "test-argocd-agent", + }, + Spec: argoapp.ApplicationSpec{ + Source: &argoapp.ApplicationSource{ + RepoURL: "https://github.com/argoproj/argocd-example-apps", + TargetRevision: "HEAD", + Path: "kustomize-guestbook", + }, + Destination: argoapp.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "foo", + }, + }, + } + err = kclient.Create(ctx, &app, metav1.CreateOptions{}) + requires.NoError(err) + + app = argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook1", + Namespace: "test-argocd-agent", + }, + Spec: argoapp.ApplicationSpec{ + Source: &argoapp.ApplicationSource{ + RepoURL: "https://github.com/argoproj/argocd-example-apps", + TargetRevision: "HEAD", + Path: "kustomize-guestbook", + }, + Destination: argoapp.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "foo", + }, + }, + } + err = kclient.Create(ctx, &app, metav1.CreateOptions{}) + requires.NoError(err) + + app = argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook2", + Namespace: "test-argocd-agent", + }, + Spec: argoapp.ApplicationSpec{ + Source: &argoapp.ApplicationSource{ + RepoURL: "https://github.com/argoproj/argocd-example-apps", + TargetRevision: "HEAD", + Path: "kustomize-guestbook", + }, + Destination: argoapp.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "foo", + }, + }, + } + err = kclient.Create(ctx, &app, metav1.CreateOptions{}) + requires.NoError(err) +} + +func (suite *FixtureTestSuite) TearDownSuite() { + requires := suite.Require() + + ctx := context.Background() + + config, err := fixture.GetSystemKubeConfig("") + requires.NoError(err) + kclient, err := fixture.NewKubeClient(config) + requires.NoError(err) + + app := argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: "test-argocd-agent", + }, + } + err = kclient.Delete(ctx, &app, metav1.DeleteOptions{}) + requires.NoError(err) + + app = argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook1", + Namespace: "test-argocd-agent", + }, + } + err = kclient.Delete(ctx, &app, metav1.DeleteOptions{}) + requires.NoError(err) + + app = argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook2", + Namespace: "test-argocd-agent", + }, + } + err = kclient.Delete(ctx, &app, metav1.DeleteOptions{}) + requires.NoError(err) + + namespace := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-argocd-agent", + }, + } + err = kclient.Delete(ctx, &namespace, metav1.DeleteOptions{}) + requires.NoError(err) +} + +func (suite *FixtureTestSuite) Test_Sanity() { + requires := suite.Require() + requires.True(true) +} + +func (suite *FixtureTestSuite) Test_KubeConfig() { + requires := suite.Require() + + config, err := fixture.GetSystemKubeConfig("") + requires.NoError(err) + requires.NotNil(config) + + kclient, err := kubernetes.NewForConfig(config) + requires.NoError(err) + requires.NotNil(kclient) +} + +func (suite *FixtureTestSuite) Test_Get_Application_Via_Dynamic() { + requires := suite.Require() + + config, err := fixture.GetSystemKubeConfig("") + requires.NoError(err) + + dclient, err := dynamic.NewForConfig(config) + requires.NoError(err) + + appResource := schema.GroupVersionResource{ + Group: "argoproj.io", + Version: "v1alpha1", + Resource: "applications", + } + + ctx := context.Background() + + unstructuredApp, err := dclient.Resource(appResource).Namespace("test-argocd-agent").Get(ctx, "guestbook", metav1.GetOptions{}) + requires.NoError(err) + + b, err := unstructuredApp.MarshalJSON() + requires.NoError(err) + requires.NotNil(b) + app := argoapp.Application{} + err = json.Unmarshal(b, &app) + requires.NoError(err) + requires.Equal("Application", app.Kind) +} + +func (suite *FixtureTestSuite) Test_List_Application_Via_Dynamic() { + requires := suite.Require() + + config, err := fixture.GetSystemKubeConfig("") + requires.NoError(err) + + dclient, err := dynamic.NewForConfig(config) + requires.NoError(err) + + appResource := schema.GroupVersionResource{ + Group: "argoproj.io", + Version: "v1alpha1", + Resource: "applications", + } + + ctx := context.Background() + + ulist, err := dclient.Resource(appResource).Namespace("test-argocd-agent").List(ctx, metav1.ListOptions{}) + requires.NoError(err) + + b, err := ulist.MarshalJSON() + requires.NoError(err) + requires.NotNil(b) + + list := argoapp.ApplicationList{} + err = json.Unmarshal(b, &list) + requires.NoError(err) + requires.NotEmpty(list) +} + +func (suite *FixtureTestSuite) Test_Get_Application_Via_RESTMapper() { + requires := suite.Require() + + ctx := context.Background() + + config, err := fixture.GetSystemKubeConfig("") + requires.NoError(err) + + scheme := runtime.NewScheme() + argoapp.AddToScheme(scheme) + + discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) + requires.NoError(err) + groupResources, err := restmapper.GetAPIGroupResources(discoveryClient) + requires.NoError(err) + + mapper := restmapper.NewDiscoveryRESTMapper(groupResources) + + app := argoapp.Application{} + + gvks, unversioned, err := scheme.ObjectKinds(&app) + requires.NoError(err) + requires.False(unversioned) + requires.NotEmpty(gvks) + + mapping, err := mapper.RESTMapping(gvks[0].GroupKind()) + requires.NoError(err) + + requires.Equal("argoproj.io", mapping.Resource.Group) + requires.Equal("v1alpha1", mapping.Resource.Version) + requires.Equal("applications", mapping.Resource.Resource) + + dclient, err := dynamic.NewForConfig(config) + requires.NoError(err) + + unstructuredApp, err := dclient.Resource(mapping.Resource).Namespace("test-argocd-agent").Get(ctx, "guestbook", metav1.GetOptions{}) + requires.NoError(err) + + b, err := unstructuredApp.MarshalJSON() + requires.NoError(err) + requires.NotEmpty(b) + app = argoapp.Application{} + err = json.Unmarshal(b, &app) + requires.NoError(err) + requires.Equal("Application", app.Kind) +} + +func (suite *FixtureTestSuite) Test_Get() { + requires := suite.Require() + + ctx := context.Background() + + config, err := fixture.GetSystemKubeConfig("") + requires.NoError(err) + + kclient, err := fixture.NewKubeClient(config) + requires.NoError(err) + + app := argoapp.Application{} + err = kclient.Get(ctx, types.NamespacedName{Namespace: "test-argocd-agent", Name: "guestbook"}, &app, metav1.GetOptions{}) + requires.NoError(err) + requires.Equal("guestbook", app.Name) + requires.Equal("Application", app.Kind) + + app = argoapp.Application{} + err = kclient.Get(ctx, types.NamespacedName{Namespace: "test-argocd-agent", Name: "guestbook"}, &app, metav1.GetOptions{}) + requires.NoError(err) + requires.Equal("guestbook", app.Name) + requires.Equal("Application", app.Kind) + + ns := corev1.Namespace{} + err = kclient.Get(ctx, types.NamespacedName{Namespace: "", Name: "test-argocd-agent"}, &ns, metav1.GetOptions{}) + requires.NoError(err) + requires.Equal("test-argocd-agent", ns.Name) +} + +func (suite *FixtureTestSuite) Test_Create_Get_Delete_Application() { + requires := suite.Require() + + ctx := context.Background() + + config, err := fixture.GetSystemKubeConfig("") + requires.NoError(err) + + kclient, err := fixture.NewKubeClient(config) + requires.NoError(err) + + app := argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "test-argocd-agent", + }, + Spec: argoapp.ApplicationSpec{ + Destination: argoapp.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "foo", + }, + }, + } + + err = kclient.Create(ctx, &app, metav1.CreateOptions{}) + requires.NoError(err) + requires.Equal("foo", app.Name) + requires.Equal("Application", app.Kind) + requires.NotEmpty(app.UID) + + app = argoapp.Application{} + err = kclient.Get(ctx, types.NamespacedName{Namespace: "test-argocd-agent", Name: "foo"}, &app, metav1.GetOptions{}) + requires.NoError(err) + requires.Equal("foo", app.Name) + requires.Equal("Application", app.Kind) + requires.NotEmpty(app.UID) + + app = argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "test-argocd-agent", + }, + } + err = kclient.Delete(ctx, &app, metav1.DeleteOptions{}) + requires.NoError(err) + + app = argoapp.Application{} + err = kclient.Get(ctx, types.NamespacedName{Namespace: "test-argocd-agent", Name: "foo"}, &app, metav1.GetOptions{}) + requires.NotNil(err) + requires.True(errors.IsNotFound(err)) +} + +func (suite *FixtureTestSuite) Test_Update_Application() { + requires := suite.Require() + + ctx := context.Background() + + config, err := fixture.GetSystemKubeConfig("") + requires.NoError(err) + + kclient, err := fixture.NewKubeClient(config) + requires.NoError(err) + + app := argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "test-argocd-agent", + }, + Spec: argoapp.ApplicationSpec{ + Source: &argoapp.ApplicationSource{ + RepoURL: "https://github.com/argoproj/argocd-example-apps", + TargetRevision: "HEAD", + Path: "kustomize-guestbook", + }, + Destination: argoapp.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "foo", + }, + }, + } + err = kclient.Create(ctx, &app, metav1.CreateOptions{}) + requires.NoError(err) + requires.Equal("foo", app.Name) + requires.Equal("Application", app.Kind) + requires.Equal("HEAD", app.Spec.Source.TargetRevision) + requires.NotEmpty(app.UID) + + app.Spec.Source.TargetRevision = "TAIL" + err = kclient.Update(ctx, &app, metav1.UpdateOptions{}) + requires.NoError(err) + requires.Equal("TAIL", app.Spec.Source.TargetRevision) + + app = argoapp.Application{} + err = kclient.Get(ctx, types.NamespacedName{Namespace: "test-argocd-agent", Name: "foo"}, &app, metav1.GetOptions{}) + requires.NoError(err) + requires.Equal("TAIL", app.Spec.Source.TargetRevision) + + err = kclient.Delete(ctx, &app, metav1.DeleteOptions{}) + requires.NoError(err) +} + +func (suite *FixtureTestSuite) Test_List_Applications() { + requires := suite.Require() + + ctx := context.Background() + + config, err := fixture.GetSystemKubeConfig("") + requires.NoError(err) + + kclient, err := fixture.NewKubeClient(config) + requires.NoError(err) + + list := argoapp.ApplicationList{} + err = kclient.List(ctx, "test-argocd-agent", &list, metav1.ListOptions{}) + requires.NoError(err) + requires.Len(list.Items, 3) +} + +func (suite *FixtureTestSuite) Test_Patch_Application() { + requires := suite.Require() + + ctx := context.Background() + + config, err := fixture.GetSystemKubeConfig("") + requires.NoError(err) + + kclient, err := fixture.NewKubeClient(config) + requires.NoError(err) + + app := argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "test-argocd-agent", + }, + Spec: argoapp.ApplicationSpec{ + Source: &argoapp.ApplicationSource{ + RepoURL: "https://github.com/argoproj/argocd-example-apps", + TargetRevision: "HEAD", + Path: "kustomize-guestbook", + }, + Destination: argoapp.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "foo", + }, + }, + } + err = kclient.Create(ctx, &app, metav1.CreateOptions{}) + requires.NoError(err) + requires.Equal("foo", app.Name) + requires.Equal("Application", app.Kind) + requires.Equal("HEAD", app.Spec.Source.TargetRevision) + requires.NotEmpty(app.UID) + + app = argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "test-argocd-agent", + }, + } + err = kclient.Patch(ctx, &app, []interface{}{ + map[string]interface{}{ + "op": "replace", + "path": "/spec/source/targetRevision", + "value": "TAIL", + }, + }, metav1.PatchOptions{}) + requires.NoError(err) + requires.Equal("TAIL", app.Spec.Source.TargetRevision) + + app = argoapp.Application{} + err = kclient.Get(ctx, types.NamespacedName{Namespace: "test-argocd-agent", Name: "foo"}, &app, metav1.GetOptions{}) + requires.NoError(err) + requires.Equal("TAIL", app.Spec.Source.TargetRevision) + + err = kclient.Delete(ctx, &app, metav1.DeleteOptions{}) + requires.NoError(err) +} + +func (suite *FixtureTestSuite) Test_SyncApplication() { + requires := suite.Require() + + ctx := context.Background() + + config, err := fixture.GetSystemKubeConfig("") + requires.NoError(err) + + kclient, err := fixture.NewKubeClient(config) + requires.NoError(err) + + app := argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "test-argocd-agent", + }, + Spec: argoapp.ApplicationSpec{ + Source: &argoapp.ApplicationSource{ + RepoURL: "https://github.com/argoproj/argocd-example-apps", + TargetRevision: "HEAD", + Path: "kustomize-guestbook", + }, + Destination: argoapp.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "foo", + }, + }, + } + err = kclient.Create(ctx, &app, metav1.CreateOptions{}) + requires.NoError(err) + + key := fixture.ToNamespacedName(&app) + + err = fixture.SyncApplication(ctx, key, kclient) + requires.NoError(err) + + app = argoapp.Application{} + err = kclient.Get(ctx, key, &app, metav1.GetOptions{}) + requires.NoError(err) + requires.NotNil(app.Operation) + + err = kclient.Delete(ctx, &app, metav1.DeleteOptions{}) + requires.NoError(err) +} + +func XTestFixtureTestSuite(t *testing.T) { + suite.Run(t, new(FixtureTestSuite)) +} diff --git a/test/e2e2/sync_test.go b/test/e2e2/sync_test.go new file mode 100644 index 0000000..a8df89b --- /dev/null +++ b/test/e2e2/sync_test.go @@ -0,0 +1,256 @@ +// Copyright 2024 The argocd-agent Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e2 + +import ( + "testing" + "time" + + "github.com/argoproj-labs/argocd-agent/test/e2e2/fixture" + argoapp "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/stretchr/testify/suite" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +type SyncTestSuite struct { + fixture.BaseSuite +} + +func (suite *SyncTestSuite) SetupTest() { + suite.BaseSuite.SetupTest() + requires := suite.Require() + + var err error + var namespace corev1.Namespace + + // Create the "guestbook" namespace on the managed agent cluster + namespace = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + }, + } + err = suite.ManagedAgentClient.Create(suite.Ctx, &namespace, metav1.CreateOptions{}) + requires.NoError(err) + + // Create the "guestbook" namespace on the autonomous agent cluster + namespace = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + }, + } + err = suite.AutonomousAgentClient.Create(suite.Ctx, &namespace, metav1.CreateOptions{}) + requires.NoError(err) +} + +func (suite *SyncTestSuite) TearDownTest() { + suite.BaseSuite.TearDownTest() + requires := suite.Require() + + var err error + + // Delete the "guestbook" namespace from the managed agent and autonomous agent clusters + namespace := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + }, + } + err = suite.ManagedAgentClient.Delete(suite.Ctx, &namespace, metav1.DeleteOptions{}) + requires.NoError(err) + err = suite.AutonomousAgentClient.Delete(suite.Ctx, &namespace, metav1.DeleteOptions{}) + requires.NoError(err) + + // Wait until the namespaces are actually gone from the clusters + requires.Eventually(func() bool { + namespace := corev1.Namespace{} + err := suite.ManagedAgentClient.Get(suite.Ctx, types.NamespacedName{Name: "guestbook"}, &namespace, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, 30*time.Second, 1*time.Second) + requires.Eventually(func() bool { + namespace := corev1.Namespace{} + err := suite.AutonomousAgentClient.Get(suite.Ctx, types.NamespacedName{Name: "guestbook"}, &namespace, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, 30*time.Second, 1*time.Second) + +} + +func (suite *SyncTestSuite) Test_Sync_Managed() { + requires := suite.Require() + + // Create a managed application in the principal's cluster + app := argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: "agent-managed", + }, + Spec: argoapp.ApplicationSpec{ + Project: "default", + Source: &argoapp.ApplicationSource{ + RepoURL: "https://github.com/argoproj/argocd-example-apps", + TargetRevision: "HEAD", + Path: "kustomize-guestbook", + }, + Destination: argoapp.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "guestbook", + }, + SyncPolicy: &argoapp.SyncPolicy{ + SyncOptions: argoapp.SyncOptions{ + "CreateNamespace=true", + }, + }, + }, + } + err := suite.PrincipalClient.Create(suite.Ctx, &app, metav1.CreateOptions{}) + requires.NoError(err) + + key := fixture.ToNamespacedName(&app) + + // Ensure the app has been pushed to the managed-agent + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.ManagedAgentClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + return err == nil + }, 30*time.Second, 1*time.Second) + + // Check that the principal's sync status is "OutOfSync" + requires.Eventually(func() bool { + app = argoapp.Application{} + err = suite.PrincipalClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + return err == nil && app.Status.Sync.Status == argoapp.SyncStatusCodeOutOfSync + }, 60*time.Second, 1*time.Second) + + // Sync the app + err = fixture.SyncApplication(suite.Ctx, key, suite.PrincipalClient) + requires.NoError(err) + + // Wait for the app on the principal to become synced + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.PrincipalClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + return err == nil && app.Status.Sync.Status == argoapp.SyncStatusCodeSynced + }, 60*time.Second, 1*time.Second) + + // Ensure the app on the managed-agent becomes synced + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.ManagedAgentClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + return err == nil && app.Status.Sync.Status == argoapp.SyncStatusCodeSynced + }, 60*time.Second, 1*time.Second) + + // Delete the app from the principal + err = suite.PrincipalClient.Delete(suite.Ctx, &app, metav1.DeleteOptions{}) + requires.NoError(err) + + // Ensure the app has been deleted from the managed-agent + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.ManagedAgentClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, 90*time.Second, 1*time.Second) +} + +func (suite *SyncTestSuite) Test_Sync_Autonomous() { + requires := suite.Require() + + // Create an autonomous application on the autonomous-agent's cluster + app := argoapp.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "guestbook", + Namespace: "argocd", + Finalizers: []string{ + "resources-finalizer.argocd.argoproj.io", + }, + }, + Spec: argoapp.ApplicationSpec{ + Project: "default", + Source: &argoapp.ApplicationSource{ + RepoURL: "https://github.com/argoproj/argocd-example-apps", + TargetRevision: "HEAD", + Path: "kustomize-guestbook", + }, + Destination: argoapp.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "guestbook", + }, + SyncPolicy: &argoapp.SyncPolicy{ + SyncOptions: argoapp.SyncOptions{ + "CreateNamespace=true", + }, + }, + }, + } + err := suite.AutonomousAgentClient.Create(suite.Ctx, &app, metav1.CreateOptions{}) + requires.NoError(err) + + agentKey := types.NamespacedName{Name: app.Name, Namespace: app.Namespace} + principalKey := types.NamespacedName{Name: app.Name, Namespace: "agent-autonomous"} + + // Ensure the app has been pushed to the principal + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.PrincipalClient.Get(suite.Ctx, principalKey, &app, metav1.GetOptions{}) + return err == nil + }, 30*time.Second, 1*time.Second) + + // Check that the autonomous-agent's sync status is "OutOfSync" + requires.Eventually(func() bool { + app = argoapp.Application{} + err = suite.AutonomousAgentClient.Get(suite.Ctx, agentKey, &app, metav1.GetOptions{}) + return err == nil && app.Status.Sync.Status == argoapp.SyncStatusCodeOutOfSync + }, 60*time.Second, 1*time.Second) + + // Sync the app + err = fixture.SyncApplication(suite.Ctx, agentKey, suite.AutonomousAgentClient) + requires.NoError(err) + + // Wait for the app on the autonomous-agent to become synced + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.AutonomousAgentClient.Get(suite.Ctx, agentKey, &app, metav1.GetOptions{}) + return err == nil && app.Status.Sync.Status == argoapp.SyncStatusCodeSynced + }, 60*time.Second, 1*time.Second) + + // Ensure the app on the principal becomes synced + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.PrincipalClient.Get(suite.Ctx, principalKey, &app, metav1.GetOptions{}) + return err == nil && app.Status.Sync.Status == argoapp.SyncStatusCodeSynced + }, 60*time.Second, 1*time.Second) + + // Delete the app from the autonomous-agent + err = suite.AutonomousAgentClient.Delete(suite.Ctx, &app, metav1.DeleteOptions{}) + requires.NoError(err) + + // Wait for the app to be deleted from the autonomous-agent + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.AutonomousAgentClient.Get(suite.Ctx, agentKey, &app, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, 90*time.Second, 1*time.Second) + + // Ensure the app has been deleted from the principal + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.PrincipalClient.Get(suite.Ctx, principalKey, &app, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, 30*time.Second, 1*time.Second) +} + +func TestSyncTestSuite(t *testing.T) { + suite.Run(t, new(SyncTestSuite)) +} diff --git a/test/e2e2/test-env/.gitignore b/test/e2e2/test-env/.gitignore new file mode 100644 index 0000000..99a84b3 --- /dev/null +++ b/test/e2e2/test-env/.gitignore @@ -0,0 +1 @@ +creds diff --git a/test/e2e2/test-env/Procfile b/test/e2e2/test-env/Procfile new file mode 100644 index 0000000..e6a282a --- /dev/null +++ b/test/e2e2/test-env/Procfile @@ -0,0 +1,3 @@ +principal: test/e2e2/test-env/start-principal.sh +agent-managed: test/e2e2/test-env/start-agent-managed.sh +agent-autonomous: test/e2e2/test-env/start-agent-autonomous.sh diff --git a/test/e2e2/test-env/agent-autonomous/argocd-secret.yaml b/test/e2e2/test-env/agent-autonomous/argocd-secret.yaml new file mode 100644 index 0000000..0cd0c6e --- /dev/null +++ b/test/e2e2/test-env/agent-autonomous/argocd-secret.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Secret +metadata: + name: argocd-secret diff --git a/test/e2e2/test-env/agent-autonomous/kustomization.yaml b/test/e2e2/test-env/agent-autonomous/kustomization.yaml new file mode 100644 index 0000000..8c6793a --- /dev/null +++ b/test/e2e2/test-env/agent-autonomous/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- https://github.com/argoproj/argo-cd/manifests/crds?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/config?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/redis?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/repo-server?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/application-controller?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/cluster-rbac/application-controller?ref=stable&timeout=240s +- ../common + +patches: +- path: argocd-secret.yaml diff --git a/test/e2e2/test-env/agent-managed/argocd-cmd-params-cm.yaml b/test/e2e2/test-env/agent-managed/argocd-cmd-params-cm.yaml new file mode 100644 index 0000000..9a4a341 --- /dev/null +++ b/test/e2e2/test-env/agent-managed/argocd-cmd-params-cm.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cmd-params-cm +data: + repo.server: repo-server-address:8081 + redis.server: redis-server-address:6379 + application.namespaces: agent-managed \ No newline at end of file diff --git a/test/e2e2/test-env/agent-managed/argocd-secret.yaml b/test/e2e2/test-env/agent-managed/argocd-secret.yaml new file mode 100644 index 0000000..0cd0c6e --- /dev/null +++ b/test/e2e2/test-env/agent-managed/argocd-secret.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Secret +metadata: + name: argocd-secret diff --git a/test/e2e2/test-env/agent-managed/kustomization.yaml b/test/e2e2/test-env/agent-managed/kustomization.yaml new file mode 100644 index 0000000..ae306ae --- /dev/null +++ b/test/e2e2/test-env/agent-managed/kustomization.yaml @@ -0,0 +1,12 @@ +resources: +- https://github.com/argoproj/argo-cd/manifests/crds?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/config?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/redis?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/repo-server?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/application-controller?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/cluster-rbac/application-controller?ref=stable&timeout=240s +- ../common + +patches: +- path: argocd-cmd-params-cm.yaml +- path: argocd-secret.yaml diff --git a/test/e2e2/test-env/apps/autonomous-guestbook.yaml b/test/e2e2/test-env/apps/autonomous-guestbook.yaml new file mode 100644 index 0000000..2207d6b --- /dev/null +++ b/test/e2e2/test-env/apps/autonomous-guestbook.yaml @@ -0,0 +1,19 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: guestbook + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: default + source: + repoURL: https://github.com/argoproj/argocd-example-apps + targetRevision: HEAD + path: kustomize-guestbook + destination: + server: https://kubernetes.default.svc + namespace: guestbook + syncPolicy: + syncOptions: + - "CreateNamespace=true" diff --git a/test/e2e2/test-env/apps/managed-guestbook.yaml b/test/e2e2/test-env/apps/managed-guestbook.yaml new file mode 100644 index 0000000..bc0d53e --- /dev/null +++ b/test/e2e2/test-env/apps/managed-guestbook.yaml @@ -0,0 +1,17 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: guestbook + namespace: agent-managed +spec: + project: default + source: + repoURL: https://github.com/argoproj/argocd-example-apps + targetRevision: HEAD + path: kustomize-guestbook + destination: + server: https://kubernetes.default.svc + namespace: guestbook + syncPolicy: + syncOptions: + - "CreateNamespace=true" diff --git a/test/e2e2/test-env/clean-apps.sh b/test/e2e2/test-env/clean-apps.sh new file mode 100755 index 0000000..1321b94 --- /dev/null +++ b/test/e2e2/test-env/clean-apps.sh @@ -0,0 +1,30 @@ +# Copyright 2024 The argocd-agent Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script to clean all apps in the demo environment. It will remove any resource +# finalizer before deletion, so it might leave your workloads behind. +for cluster in control-plane agent-managed agent-autonomous; do + apps=$(kubectl --context vcluster-${cluster} get apps -A --no-headers -o go-template="{{range .items}}{{.metadata.namespace}} {{.metadata.name}}{{end}}") + test -z "$apps" && continue + OIFS="$IFS" + while IFS= read -r app; do + IFS=" " set -- $app + namespace="$1" + name="$2" + echo "Patching $namespace/$name in vcluster-${cluster}" + kubectl --context vcluster-${cluster} patch -n $namespace app $name -p '{"metadata":{"finalizers":null}}' --type=merge + done < <(echo "$apps") + echo "Deleting all apps in vcluster-${cluster}" + kubectl --context vcluster-${cluster} delete apps --all-namespaces --all +done diff --git a/test/e2e2/test-env/common/default-project.yaml b/test/e2e2/test-env/common/default-project.yaml new file mode 100644 index 0000000..e9b1953 --- /dev/null +++ b/test/e2e2/test-env/common/default-project.yaml @@ -0,0 +1,15 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: default +spec: + clusterResourceWhitelist: + - group: '*' + kind: '*' + destinations: + - namespace: '*' + server: '*' + sourceNamespaces: + - '*' + sourceRepos: + - '*' diff --git a/test/e2e2/test-env/common/kustomization.yaml b/test/e2e2/test-env/common/kustomization.yaml new file mode 100644 index 0000000..9d4551f --- /dev/null +++ b/test/e2e2/test-env/common/kustomization.yaml @@ -0,0 +1,3 @@ +resources: +- default-project.yaml +- redis-secret.yaml diff --git a/test/e2e2/test-env/common/redis-secret.yaml b/test/e2e2/test-env/common/redis-secret.yaml new file mode 100644 index 0000000..561af7d --- /dev/null +++ b/test/e2e2/test-env/common/redis-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: + auth: a3BReU5ZLWpjaGU3RUJ1Vw== +kind: Secret +metadata: + name: argocd-redis +type: Opaque diff --git a/test/e2e2/test-env/control-plane/appproject-default.yaml b/test/e2e2/test-env/control-plane/appproject-default.yaml new file mode 100644 index 0000000..e9b1953 --- /dev/null +++ b/test/e2e2/test-env/control-plane/appproject-default.yaml @@ -0,0 +1,15 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: default +spec: + clusterResourceWhitelist: + - group: '*' + kind: '*' + destinations: + - namespace: '*' + server: '*' + sourceNamespaces: + - '*' + sourceRepos: + - '*' diff --git a/test/e2e2/test-env/control-plane/argocd-cmd-params-cm.yaml b/test/e2e2/test-env/control-plane/argocd-cmd-params-cm.yaml new file mode 100644 index 0000000..e293e84 --- /dev/null +++ b/test/e2e2/test-env/control-plane/argocd-cmd-params-cm.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cmd-params-cm +data: + application.namespaces: "*" diff --git a/test/e2e2/test-env/control-plane/argocd-secret.yaml b/test/e2e2/test-env/control-plane/argocd-secret.yaml new file mode 100644 index 0000000..0cd0c6e --- /dev/null +++ b/test/e2e2/test-env/control-plane/argocd-secret.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Secret +metadata: + name: argocd-secret diff --git a/test/e2e2/test-env/control-plane/kustomization.yaml b/test/e2e2/test-env/control-plane/kustomization.yaml new file mode 100644 index 0000000..88a3be5 --- /dev/null +++ b/test/e2e2/test-env/control-plane/kustomization.yaml @@ -0,0 +1,18 @@ +resources: +- https://github.com/argoproj/argo-cd/manifests/crds?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/config?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/dex?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/redis?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/repo-server?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/base/server?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/manifests/cluster-rbac/server?ref=stable&timeout=240s +- https://github.com/argoproj/argo-cd/examples/k8s-rbac/argocd-server-applications?ref=stable&timeout=240s +- ../common + +patches: +- path: argocd-cmd-params-cm.yaml +- path: argocd-secret.yaml +- path: server-service.yaml +- path: repo-server-service.yaml +- path: redis-service.yaml +- path: appproject-default.yaml diff --git a/test/e2e2/test-env/control-plane/redis-service.yaml b/test/e2e2/test-env/control-plane/redis-service.yaml new file mode 100644 index 0000000..1c77576 --- /dev/null +++ b/test/e2e2/test-env/control-plane/redis-service.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Service +metadata: + name: argocd-redis +spec: + type: LoadBalancer + loadBalancerIP: 192.168.56.221 diff --git a/test/e2e2/test-env/control-plane/repo-server-service.yaml b/test/e2e2/test-env/control-plane/repo-server-service.yaml new file mode 100644 index 0000000..263db14 --- /dev/null +++ b/test/e2e2/test-env/control-plane/repo-server-service.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Service +metadata: + name: argocd-repo-server +spec: + type: LoadBalancer + loadBalancerIP: 192.168.56.222 diff --git a/test/e2e2/test-env/control-plane/server-service.yaml b/test/e2e2/test-env/control-plane/server-service.yaml new file mode 100644 index 0000000..f30c5c9 --- /dev/null +++ b/test/e2e2/test-env/control-plane/server-service.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Service +metadata: + name: argocd-server +spec: + type: LoadBalancer + loadBalancerIP: 192.168.56.220 diff --git a/test/e2e2/test-env/gen-creds.sh b/test/e2e2/test-env/gen-creds.sh new file mode 100755 index 0000000..3dff924 --- /dev/null +++ b/test/e2e2/test-env/gen-creds.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Copyright 2024 The argocd-agent Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +############################################################################## +# Script to generate credentials for e2e-tests of argocd-agent. +# +# WARNING: Development script. Do not use to produce production credentials. +# This script comes without any promises. It should only be used to generate +# credentials for your dev or demo environments. The passwords produced are +# weak. +############################################################################## + +set -eo pipefail +if ! pwmake=$(which pwmake); then + pwmake=$(which pwgen) +fi + +SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +htpasswd=$(which htpasswd) +creds_path=${SCRIPTPATH}/creds +test -d ${creds_path} || mkdir ${creds_path} + +if test -f "${creds_path}/users.control-plane"; then + echo "Truncating existing creds" + rm -f "${creds_path}/users.control-plane" +fi +touch "${creds_path}/users.control-plane" + +for ag in agent-managed agent-autonomous; do + password=$($pwmake 56) + $htpasswd -b -B "${creds_path}/users.control-plane" "${ag}" "${password}" + echo "${ag}:${password}" > "${creds_path}/creds.${ag}" +done diff --git a/test/e2e2/test-env/resources/metallb-ipaddresspool.yaml b/test/e2e2/test-env/resources/metallb-ipaddresspool.yaml new file mode 100644 index 0000000..fe087f2 --- /dev/null +++ b/test/e2e2/test-env/resources/metallb-ipaddresspool.yaml @@ -0,0 +1,9 @@ +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: default-addresspool + namespace: metallb-system +spec: + addresses: + - 192.168.56.100-192.168.56.254 + autoAssign: true diff --git a/test/e2e2/test-env/resources/scc-anyuid-seccomp-netbind.yaml b/test/e2e2/test-env/resources/scc-anyuid-seccomp-netbind.yaml new file mode 100644 index 0000000..64f466a --- /dev/null +++ b/test/e2e2/test-env/resources/scc-anyuid-seccomp-netbind.yaml @@ -0,0 +1,53 @@ +# This is the default any-uid OpenShift SCC, but with: +# - .allowedCapabilities[0] = "NET_BIND_SERVICE" +# - .seccompProfiles[0] = "runtime/default" + +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 + +metadata: + annotations: + kubernetes.io/description: >- + anyuid provides all features of the restricted SCC but allows users to run + with any UID and any GID. + name: anyuid-seccomp-netbind + +seccompProfiles: + - runtime/default +allowedCapabilities: + - NET_BIND_SERVICE +fsGroup: + type: RunAsAny +groups: + - 'system:cluster-admins' +priority: 10 +requiredDropCapabilities: + - MKNOD +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: [] +volumes: + - configMap + - csi + - downwardAPI + - emptyDir + - ephemeral + - persistentVolumeClaim + - projected + - secret + +defaultAddCapabilities: null + +readOnlyRootFilesystem: false + +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false diff --git a/test/e2e2/test-env/resources/vcluster.yaml b/test/e2e2/test-env/resources/vcluster.yaml new file mode 100644 index 0000000..a69eab8 --- /dev/null +++ b/test/e2e2/test-env/resources/vcluster.yaml @@ -0,0 +1,15 @@ +controlPlane: + statefulSet: + security: + podSecurityContext: + fsGroup: 12345 + containerSecurityContext: + runAsUser: 12345 + runAsNonRoot: true + +rbac: + role: + extraRules: + - apiGroups: [""] + resources: ["endpoints/restricted"] + verbs: ["create"] \ No newline at end of file diff --git a/test/e2e2/test-env/setup-vcluster-env.sh b/test/e2e2/test-env/setup-vcluster-env.sh new file mode 100755 index 0000000..3ec18d0 --- /dev/null +++ b/test/e2e2/test-env/setup-vcluster-env.sh @@ -0,0 +1,255 @@ +#!/bin/bash +# Copyright 2024 The argocd-agent Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -o pipefail + +# enable for debugging: +# set -x + +SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +VCLUSTERS="control-plane:argocd agent-managed:argocd agent-autonomous:argocd" +VCLUSTERS_AGENTS="agent-managed:argocd agent-autonomous:argocd" +action="$1" + +# Kubectl context to restore +initial_context=$(kubectl config current-context) + +cleanup() { + kubectl config use-context ${initial_context} +} + +on_error() { + echo "ERROR: Error occurred, terminating." >&2 + cleanup +} + +cluster() { + IFS=":" s=($1); echo ${s[0]} +} + +namespace() { + IFS=":" s=($1); echo ${s[1]} +} + +trap cleanup EXIT +trap on_error ERR + +# check_for_openshift looks for cluster OpenShift API Resources, and if found, sets OPENSHIFT=true +check_for_openshift() { + + OPENSHIFT= + if (kubectl api-resources || true) | grep -q "openshift.io"; then + OPENSHIFT=true + fi + +} + + +# wait_for_pods looks all Pods running in k8s context $1, and keeps waiting until running count == $2. +wait_for_pods() { + + set +e + + count=0 + while [ true ] + do + + echo " -> Waiting for $1 pods to be running. Expecting $2 running pods." + + kubectl get pods --context="$1" -A + RUNNING_PODS=`kubectl get pods --context="$1" -A | grep "Running" | wc -l | tr -d '[:space:]'` + + if [[ "$RUNNING_PODS" == "$2" ]]; then + break + fi + + count=$((count+1)) + if [[ $count -eq 60 ]]; then + echo " -> Timed out waiting for pods to be running." + echo " -> Pods:" + kubectl describe pods --context="$1" -n argocd + echo " -> Deployments:" + kubectl describe deployments --context="$1" -n argocd + echo " -> Stateful Sets:" + kubectl describe statefulsets --context="$1" -n argocd + echo " -> Exiting due to timeout waiting for pods to be running." + exit 1 + fi + + sleep 10 + done + + echo " -> Done waiting for $1 pods." + + set -e +} + + +apply() { + + TMP_DIR=`mktemp -d` + + echo "-> TMP_DIR is $TMP_DIR" + cp -r ${SCRIPTPATH}/* $TMP_DIR + + # Comment out 'loadBalancerIP:' lines on OpenShift + if [[ "$OPENSHIFT" != "" ]]; then + sed -i.bak -e '/loadBalancerIP/s/^/#/' $TMP_DIR/control-plane/redis-service.yaml + sed -i.bak -e '/loadBalancerIP/s/^/#/' $TMP_DIR/control-plane/repo-server-service.yaml + sed -i.bak -e '/loadBalancerIP/s/^/#/' $TMP_DIR/control-plane/server-service.yaml + fi + + echo "-> Create Argo CD on control plane" + + cluster=control-plane + namespace=argocd + echo " --> Creating instance in vcluster $cluster" + kubectl --context vcluster-$cluster create ns $namespace || true + + # Run 'kubectl apply' twice, to avoid the following error that occurs during the first invocation: + # - 'error: resource mapping not found for name: "default" namespace: "" from "(...)": no matches for kind "AppProject" in version "argoproj.io/v1alpha1"' + kubectl --context vcluster-$cluster apply -n $namespace -k ${TMP_DIR}/${cluster} || true + kubectl --context vcluster-$cluster apply -n $namespace -k ${TMP_DIR}/${cluster} + + if [[ "$OPENSHIFT" != "" ]]; then + + echo "-> Waiting for Redis load balancer on control plane Argo CD" + + while [ true ] + do + REDIS_ADDR=`kubectl --context vcluster-control-plane -n argocd get service/argocd-redis -o json | jq -r '.status.loadBalancer.ingress[0].hostname'` + + if [[ "$REDIS_ADDR" != "" ]] && [[ "$REDIS_ADDR" != "null" ]]; then + break + fi + + sleep 2 + done + + echo "-> Waiting for repo-server load balancer on control plane Argo CD" + + while [ true ] + do + REPO_SERVER_ADDR=`kubectl --context vcluster-control-plane -n argocd get service/argocd-repo-server -o json | jq -r '.status.loadBalancer.ingress[0].hostname'` + + if [[ "$REPO_SERVER_ADDR" != "" ]] && [[ "$REPO_SERVER_ADDR" != "null" ]]; then + break + fi + sleep 2 + + done + + else + # For all other cases, use hardcoded values + REPO_SERVER_ADDR="192.168.56.222" + REDIS_ADDR="192.168.56.221" + fi + + echo "Redis on control plane: $REDIS_ADDR" + echo "Repo server URL on control plane: $REPO_SERVER_ADDR" + + # Update the Argo CD repo-server/redis addresses that agent-managed Argo CD instance connects to + sed -i.bak "s/repo-server-address/$REPO_SERVER_ADDR/g" "$TMP_DIR/agent-managed/argocd-cmd-params-cm.yaml" + sed -i.bak "s/redis-server-address/$REDIS_ADDR/g" "$TMP_DIR/agent-managed/argocd-cmd-params-cm.yaml" + + echo "-> Creating Argo CD instances in vclusters" + for c in $VCLUSTERS_AGENTS; do + cluster=$(cluster $c) + namespace=$(namespace $c) + echo " --> Creating instance in vcluster $cluster" + kubectl --context vcluster-$cluster create ns $namespace || true + + # Run 'kubectl apply' twice, to avoid error that occurs during the first invocation (see above for error) + kubectl --context vcluster-$cluster apply -n $namespace -k ${TMP_DIR}/${cluster} || true + kubectl --context vcluster-$cluster apply -n $namespace -k ${TMP_DIR}/${cluster} + done + + kubectl --context vcluster-control-plane create ns agent-autonomous || true + kubectl --context vcluster-control-plane create ns agent-managed || true + kubectl --context vcluster-agent-managed create ns agent-managed || true + + echo "-> Waiting for all the Argo CD/vCluster pods to be running on vclusters" + wait_for_pods vcluster-control-plane 5 + wait_for_pods vcluster-agent-autonomous 4 + wait_for_pods vcluster-agent-managed 4 + +} + +check_for_openshift + + +case "$action" in +create) + + + kubectl create ns vcluster-agent-managed --context=${initial_context} || true + kubectl create ns vcluster-control-plane --context=${initial_context} || true + kubectl create ns vcluster-agent-autonomous --context=${initial_context} || true + + EXTRA_VCLUSTER_PARAMS="" + + if [[ "$OPENSHIFT" != "" ]]; then + + # Ensure that the namespaces we are using for our vclusters use our custom SCC (see SCC yaml for details) + kubectl apply -f ${SCRIPTPATH}/resources/scc-anyuid-seccomp-netbind.yaml + + oc adm policy add-scc-to-group anyuid-seccomp-netbind system:serviceaccounts:vcluster-agent-managed --context=${initial_context} + + oc adm policy add-scc-to-group anyuid-seccomp-netbind system:serviceaccounts:vcluster-control-plane --context=${initial_context} + + oc adm policy add-scc-to-group anyuid-seccomp-netbind system:serviceaccounts:vcluster-agent-autonomous --context=${initial_context} + + EXTRA_VCLUSTER_PARAMS="-f ${SCRIPTPATH}/resources/vcluster.yaml" + fi + + echo "-> Creating required vclusters" + for c in $VCLUSTERS; do + cluster=$(cluster $c) + echo " --> Creating vcluster $cluster" + vcluster create --context=${initial_context} ${EXTRA_VCLUSTER_PARAMS} -n vcluster-${cluster} --expose --kube-config-context-name vcluster-${cluster} vcluster-${cluster} + kubectl config use-context ${initial_context} + + # I found a sleep statement here was beneficial to allow time for the load balancer to become available. If we find this is not required, these commented out lines should be removed. + # if [[ "$OPENSHIFT" != "" ]]; then + # sleep 60 + # fi + done + sleep 2 + apply + ;; +apply) + apply + ;; +delete) + echo "-> Deleting vclusters" + for c in $VCLUSTERS; do + cluster=$(cluster $c) + echo " --> Deleting vcluster $cluster" + vcluster delete --context=${initial_context} vcluster-${cluster} || true + done + kubectl delete --context=${initial_context} ns vcluster-control-plane || true + kubectl delete --context=${initial_context} ns vcluster-agent-managed || true + kubectl delete --context=${initial_context} ns vcluster-agent-autonomous || true + + kubectl config delete-context vcluster-control-plane || true + kubectl config delete-context vcluster-agent-managed || true + kubectl config delete-context vcluster-agent-autonomous || true + + ;; +*) + echo "$0 (create|delete)" >&2 + exit 1 +esac diff --git a/test/e2e2/test-env/start-agent-autonomous.sh b/test/e2e2/test-env/start-agent-autonomous.sh new file mode 100755 index 0000000..faa01f9 --- /dev/null +++ b/test/e2e2/test-env/start-agent-autonomous.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Copyright 2024 The argocd-agent Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e -o pipefail +ARGS=$* +if ! kubectl config get-contexts | tail -n +2 | awk '{ print $2 }' | grep -qE '^vcluster-agent-autonomous$'; then + echo "kube context vcluster-agent-autonomous is not configured; missing setup?" >&2 + exit 1 +fi +SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +test -f cmd/agent/main.go || (echo "Script should be run from argocd-agent's root path" >&2; exit 1) +go run ./cmd/agent/main.go --agent-mode autonomous --creds userpass:${SCRIPTPATH}/creds/creds.agent-autonomous --server-address 127.0.0.1 --server-port 8443 --insecure-tls --kubecontext vcluster-agent-autonomous --namespace argocd $ARGS diff --git a/test/e2e2/test-env/start-agent-managed.sh b/test/e2e2/test-env/start-agent-managed.sh new file mode 100755 index 0000000..a710391 --- /dev/null +++ b/test/e2e2/test-env/start-agent-managed.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Copyright 2024 The argocd-agent Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex -o pipefail +ARGS=$* +if ! kubectl config get-contexts | tail -n +2 | awk '{ print $2 }' | grep -qE '^vcluster-agent-managed$'; then + echo "kube context vcluster-agent-autonomous is not configured; missing setup?" >&2 + exit 1 +fi +SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +test -f cmd/agent/main.go || (echo "Script should be run from argocd-agent's root path" >&2; exit 1) +go run ./cmd/agent/main.go --agent-mode managed --creds userpass:${SCRIPTPATH}/creds/creds.agent-managed --server-address 127.0.0.1 --server-port 8443 --insecure-tls --kubecontext vcluster-agent-managed --namespace agent-managed $ARGS diff --git a/test/e2e2/test-env/start-principal.sh b/test/e2e2/test-env/start-principal.sh new file mode 100755 index 0000000..8b0570b --- /dev/null +++ b/test/e2e2/test-env/start-principal.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Copyright 2024 The argocd-agent Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex -o pipefail +ARGS=$* +if ! kubectl config get-contexts | tail -n +2 | awk '{ print $2 }' | grep -qE '^vcluster-control-plane$'; then + echo "kube context vcluster-agent-autonomous is not configured; missing setup?" >&2 + exit 1 +fi +SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +test -f cmd/principal/main.go || (echo "Script should be run from argocd-agent's root path" >&2; exit 1) +go run ./cmd/principal --allowed-namespaces '*' --insecure-tls-generate --insecure-jwt-generate --kubecontext vcluster-control-plane --log-level trace --passwd ${SCRIPTPATH}/creds/users.control-plane $ARGS From 3b2f0ce09e6eec1504060f83f06050e8118e2b94 Mon Sep 17 00:00:00 2001 From: John Pitman Date: Wed, 30 Oct 2024 14:10:41 -0400 Subject: [PATCH 2/4] remove CI action for this branch Signed-off-by: John Pitman --- .github/workflows/ci.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 1053335..512e21d 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -3,7 +3,6 @@ on: push: branches: - 'main' - - 'gitops-5159-e2e-test-framework' pull_request: branches: - 'main' From b5afc9bff6cd81636cff9648728239a74810b1b2 Mon Sep 17 00:00:00 2001 From: John Pitman Date: Tue, 5 Nov 2024 13:49:05 -0500 Subject: [PATCH 3/4] updates based on PR feedback Signed-off-by: John Pitman --- .github/workflows/ci.yaml | 2 +- Makefile | 2 +- test/e2e2/README.md | 2 +- test/e2e2/basic_test.go | 73 ++++++++++++++++++-- test/e2e2/fixture/fixture.go | 4 +- test/e2e2/fixture/kubeclient.go | 50 ++++++++++++++ test/e2e2/fixture/kubeconfig.go | 15 ---- test/e2e2/fixture_test.go | 4 ++ test/e2e2/sync_test.go | 66 +++++++++++++++++- test/e2e2/test-env/start-agent-autonomous.sh | 5 +- test/e2e2/test-env/start-agent-managed.sh | 3 +- test/e2e2/test-env/start-principal.sh | 3 +- 12 files changed, 195 insertions(+), 34 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 512e21d..2c6e2f2 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -195,7 +195,7 @@ jobs: make setup-e2e2 - name: Run the principal and agents run: | - make start-argocd-agent 2>&1 | sed -r "s/[[:cntrl:]]\[[0-9]{1,3}m//g" > /tmp/e2e-argocd-agent.log & + make start-e2e2 2>&1 | sed -r "s/[[:cntrl:]]\[[0-9]{1,3}m//g" > /tmp/e2e-argocd-agent.log & sleep 10 - name: Run the e2e tests run: | diff --git a/Makefile b/Makefile index d6c20fc..3306480 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ setup-e2e2: test/e2e2/test-env/setup-vcluster-env.sh create .PHONY: start-argocd-agent -start-argocd-agent: +start-e2e2: test/e2e2/test-env/gen-creds.sh goreman -f test/e2e2/test-env/Procfile start diff --git a/test/e2e2/README.md b/test/e2e2/README.md index 1ea7da2..036a8c1 100644 --- a/test/e2e2/README.md +++ b/test/e2e2/README.md @@ -27,7 +27,7 @@ make setup-e2e2 To run the principal and agents, execute the following command from the repository root: ```shell -make start-argocd-agent +make start-e2e2 ``` To run the tests, execute the following command from the repository root in a separate terminal instance: diff --git a/test/e2e2/basic_test.go b/test/e2e2/basic_test.go index 5235e34..f4bda8e 100644 --- a/test/e2e2/basic_test.go +++ b/test/e2e2/basic_test.go @@ -30,7 +30,7 @@ type BasicTestSuite struct { fixture.BaseSuite } -func (suite *BasicTestSuite) Test_Agent_Managed() { +func (suite *BasicTestSuite) Test_AgentManaged() { requires := suite.Require() // Create a managed application in the principal's cluster @@ -69,6 +69,37 @@ func (suite *BasicTestSuite) Test_Agent_Managed() { return err == nil }, 30*time.Second, 1*time.Second) + // Check that the .spec field of the managed-agent matches that of the + // principal + app = argoapp.Application{} + err = suite.PrincipalClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + requires.NoError(err) + mapp := argoapp.Application{} + err = suite.ManagedAgentClient.Get(suite.Ctx, key, &mapp, metav1.GetOptions{}) + requires.NoError(err) + requires.Equal(&app.Spec, &mapp.Spec) + + // Modify the application on the principal and ensure the change is + // propagated to the managed-agent + err = suite.PrincipalClient.EnsureApplicationUpdate(suite.Ctx, key, func(app *argoapp.Application) error { + app.Spec.Info = []argoapp.Info{ + { + Name: "e2e", + Value: "test", + }, + } + return nil + }, metav1.UpdateOptions{}) + requires.NoError(err) + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.ManagedAgentClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + return err == nil && + len(app.Spec.Info) == 1 && + app.Spec.Info[0].Name == "e2e" && + app.Spec.Info[0].Value == "test" + }, 30*time.Second, 1*time.Second) + // Delete the app from the principal err = suite.PrincipalClient.Delete(suite.Ctx, &app, metav1.DeleteOptions{}) requires.NoError(err) @@ -81,7 +112,7 @@ func (suite *BasicTestSuite) Test_Agent_Managed() { }, 30*time.Second, 1*time.Second) } -func (suite *BasicTestSuite) Test_Agent_Autonomous() { +func (suite *BasicTestSuite) Test_AgentAutonomous() { requires := suite.Require() // Create an autonomous application on the autonomous-agent's cluster @@ -114,15 +145,47 @@ func (suite *BasicTestSuite) Test_Agent_Autonomous() { err := suite.AutonomousAgentClient.Create(suite.Ctx, &app, metav1.CreateOptions{}) requires.NoError(err) - key := types.NamespacedName{Name: app.Name, Namespace: "agent-autonomous"} + principalKey := types.NamespacedName{Name: app.Name, Namespace: "agent-autonomous"} + agentKey := fixture.ToNamespacedName(&app) // Ensure the app has been pushed to the principal requires.Eventually(func() bool { app := argoapp.Application{} - err := suite.PrincipalClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + err := suite.PrincipalClient.Get(suite.Ctx, principalKey, &app, metav1.GetOptions{}) return err == nil }, 30*time.Second, 1*time.Second) + // Check that the .spec field of the principal matches that of the + // autonomous-agent + app = argoapp.Application{} + err = suite.AutonomousAgentClient.Get(suite.Ctx, agentKey, &app, metav1.GetOptions{}) + requires.NoError(err) + papp := argoapp.Application{} + err = suite.PrincipalClient.Get(suite.Ctx, principalKey, &papp, metav1.GetOptions{}) + requires.NoError(err) + requires.Equal(&app.Spec, &papp.Spec) + + // Modify the application on the autonomous-agent and ensure the change is + // propagated to the principal + err = suite.AutonomousAgentClient.EnsureApplicationUpdate(suite.Ctx, agentKey, func(app *argoapp.Application) error { + app.Spec.Info = []argoapp.Info{ + { + Name: "e2e", + Value: "test", + }, + } + return nil + }, metav1.UpdateOptions{}) + requires.NoError(err) + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.PrincipalClient.Get(suite.Ctx, principalKey, &app, metav1.GetOptions{}) + return err == nil && + len(app.Spec.Info) == 1 && + app.Spec.Info[0].Name == "e2e" && + app.Spec.Info[0].Value == "test" + }, 30*time.Second, 1*time.Second) + // Delete the app from the autonomous-agent err = suite.AutonomousAgentClient.Delete(suite.Ctx, &app, metav1.DeleteOptions{}) requires.NoError(err) @@ -130,7 +193,7 @@ func (suite *BasicTestSuite) Test_Agent_Autonomous() { // Ensure the app has been deleted from the principal requires.Eventually(func() bool { app := argoapp.Application{} - err := suite.PrincipalClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + err := suite.PrincipalClient.Get(suite.Ctx, principalKey, &app, metav1.GetOptions{}) return errors.IsNotFound(err) }, 30*time.Second, 1*time.Second) } diff --git a/test/e2e2/fixture/fixture.go b/test/e2e2/fixture/fixture.go index a7c005a..641180f 100644 --- a/test/e2e2/fixture/fixture.go +++ b/test/e2e2/fixture/fixture.go @@ -57,14 +57,14 @@ func (suite *BaseSuite) SetupSuite() { func (suite *BaseSuite) SetupTest() { err := CleanUp(suite.Ctx, suite.PrincipalClient, suite.ManagedAgentClient, suite.AutonomousAgentClient) - suite.Assert().Nil(err) + suite.Require().Nil(err) suite.T().Logf("Test begun at: %v", time.Now()) } func (suite *BaseSuite) TearDownTest() { suite.T().Logf("Test ended at: %v", time.Now()) err := CleanUp(suite.Ctx, suite.PrincipalClient, suite.ManagedAgentClient, suite.AutonomousAgentClient) - suite.Assert().Nil(err) + suite.Require().Nil(err) } func ensureDeletion(ctx context.Context, kclient KubeClient, app argoapp.Application) error { diff --git a/test/e2e2/fixture/kubeclient.go b/test/e2e2/fixture/kubeclient.go index 0b0c5da..2632d3e 100644 --- a/test/e2e2/fixture/kubeclient.go +++ b/test/e2e2/fixture/kubeclient.go @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package fixture provides a client interface similar to the one provided by +// the controller-runtime package, in order to avoid creating a dependency on +// the controller-runtime package. package fixture import ( @@ -25,6 +28,7 @@ import ( apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -37,11 +41,19 @@ import ( "k8s.io/client-go/restmapper" ) +// KubeObject represents a Kubernetes object. This allows the client interface +// to work seamlessly with any resource that implements both the metav1.Object +// and runtime.Object interfaces. This is similar to the controller-runtime's +// client.Object interface. type KubeObject interface { metav1.Object runtime.Object } +// KubeObjectList represents a Kubernetes object list. This allows the client +// interface to work seamlessly with any resource that implements both the +// metav1.ListInterface and runtime.Object interfaces. This is similar to the +// controller-runtime's client.ObjectList interface. type KubeObjectList interface { metav1.ListInterface runtime.Object @@ -108,6 +120,9 @@ func NewKubeClient(config *rest.Config) (KubeClient, error) { }, nil } +// Get returns the object with the specified key from the cluster. object must +// be a struct pointer so it can be updated with the result returned by the +// server. func (c KubeClient) Get(ctx context.Context, key types.NamespacedName, object KubeObject, options metav1.GetOptions) error { resource, err := c.resourceFor(object) if err != nil { @@ -131,6 +146,10 @@ func (c KubeClient) Get(ctx context.Context, key types.NamespacedName, object Ku return err } +// List returns a list of objects matching the criteria specified by the given +// list options from the given namespace. list must be a struct pointer so that +// the Items field in the list can be populated with the results returned by the +// server. func (c KubeClient) List(ctx context.Context, namespace string, list KubeObjectList, options metav1.ListOptions) error { resource, err := c.resourceFor(list) if err != nil { @@ -150,6 +169,8 @@ func (c KubeClient) List(ctx context.Context, namespace string, list KubeObjectL return err } +// Create creates the given object in the cluster. object must be a struct +// pointer so that it can be updated with the result returned by the server. func (c KubeClient) Create(ctx context.Context, object KubeObject, options metav1.CreateOptions) error { resource, err := c.resourceFor(object) if err != nil { @@ -183,6 +204,8 @@ func (c KubeClient) Create(ctx context.Context, object KubeObject, options metav return err } +// Update updates the given object in the cluster. object must be a struct +// pointer so that it can be updated with the result returned by the server. func (c KubeClient) Update(ctx context.Context, object KubeObject, options metav1.UpdateOptions) error { resource, err := c.resourceFor(object) if err != nil { @@ -206,6 +229,9 @@ func (c KubeClient) Update(ctx context.Context, object KubeObject, options metav return err } +// Patch patches the given object in the cluster using the JSONPatch patch type. +// object must be a struct pointer so that it can be updated with the result +// returned by the server. func (c KubeClient) Patch(ctx context.Context, object KubeObject, jsonPatch []interface{}, options metav1.PatchOptions) error { resource, err := c.resourceFor(object) if err != nil { @@ -230,6 +256,7 @@ func (c KubeClient) Patch(ctx context.Context, object KubeObject, jsonPatch []in return err } +// Delete deletes the given object from the server. func (c KubeClient) Delete(ctx context.Context, object KubeObject, options metav1.DeleteOptions) error { resource, err := c.resourceFor(object) if err != nil { @@ -269,3 +296,26 @@ func (c KubeClient) resourceFor(object runtime.Object) (schema.GroupVersionResou } return resource, nil } + +// EnsureApplicationUpdate ensures the argocd application with the given key is +// updated by retrying if there is a conflicting change. +func (c KubeClient) EnsureApplicationUpdate(ctx context.Context, key types.NamespacedName, modify func(*argoapp.Application) error, options metav1.UpdateOptions) error { + var err error + for { + var app argoapp.Application + err = c.Get(ctx, key, &app, metav1.GetOptions{}) + if err != nil { + return err + } + + err = modify(&app) + if err != nil { + return err + } + + err = c.Update(ctx, &app, options) + if !errors.IsConflict(err) { + return err + } + } +} diff --git a/test/e2e2/fixture/kubeconfig.go b/test/e2e2/fixture/kubeconfig.go index 6ec4386..60c9de8 100644 --- a/test/e2e2/fixture/kubeconfig.go +++ b/test/e2e2/fixture/kubeconfig.go @@ -39,20 +39,5 @@ func GetSystemKubeConfig(kcontext string) (*rest.Config, error) { return nil, err } - err = setRateLimitOnRestConfig(restConfig) - if err != nil { - return nil, err - } - return restConfig, nil } - -// setRateLimitOnRestConfig sets the QPS and Burst for the rest config -func setRateLimitOnRestConfig(restConfig *rest.Config) error { - if restConfig != nil { - // Prevent rate limiting of our requests - restConfig.QPS = 100 - restConfig.Burst = 250 - } - return nil -} diff --git a/test/e2e2/fixture_test.go b/test/e2e2/fixture_test.go index 54bb827..d41b7b1 100644 --- a/test/e2e2/fixture_test.go +++ b/test/e2e2/fixture_test.go @@ -34,6 +34,10 @@ import ( "k8s.io/client-go/restmapper" ) +// FixtureTestSuit is code used to experiment with and test the e2e fixture code +// itself. It doesn't test any of the project's components. In order to run, it +// requires the Argo CD CRDs (i.e. Application etc.) to be installed on the +// target cluster. It is currently commented out. type FixtureTestSuite struct { suite.Suite } diff --git a/test/e2e2/sync_test.go b/test/e2e2/sync_test.go index a8df89b..57de806 100644 --- a/test/e2e2/sync_test.go +++ b/test/e2e2/sync_test.go @@ -88,7 +88,7 @@ func (suite *SyncTestSuite) TearDownTest() { } -func (suite *SyncTestSuite) Test_Sync_Managed() { +func (suite *SyncTestSuite) Test_SyncManaged() { requires := suite.Require() // Create a managed application in the principal's cluster @@ -152,6 +152,37 @@ func (suite *SyncTestSuite) Test_Sync_Managed() { return err == nil && app.Status.Sync.Status == argoapp.SyncStatusCodeSynced }, 60*time.Second, 1*time.Second) + // Check that the .spec field of the managed-agent matches that of the + // principal + app = argoapp.Application{} + err = suite.PrincipalClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + requires.NoError(err) + mapp := argoapp.Application{} + err = suite.ManagedAgentClient.Get(suite.Ctx, key, &mapp, metav1.GetOptions{}) + requires.NoError(err) + requires.Equal(&app.Spec, &mapp.Spec) + + // Modify the application on the principal and ensure the change is + // propagated to the managed-agent + err = suite.PrincipalClient.EnsureApplicationUpdate(suite.Ctx, key, func(app *argoapp.Application) error { + app.Spec.Info = []argoapp.Info{ + { + Name: "e2e", + Value: "test", + }, + } + return nil + }, metav1.UpdateOptions{}) + requires.NoError(err) + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.ManagedAgentClient.Get(suite.Ctx, key, &app, metav1.GetOptions{}) + return err == nil && + len(app.Spec.Info) == 1 && + app.Spec.Info[0].Name == "e2e" && + app.Spec.Info[0].Value == "test" + }, 30*time.Second, 1*time.Second) + // Delete the app from the principal err = suite.PrincipalClient.Delete(suite.Ctx, &app, metav1.DeleteOptions{}) requires.NoError(err) @@ -164,7 +195,7 @@ func (suite *SyncTestSuite) Test_Sync_Managed() { }, 90*time.Second, 1*time.Second) } -func (suite *SyncTestSuite) Test_Sync_Autonomous() { +func (suite *SyncTestSuite) Test_SyncAutonomous() { requires := suite.Require() // Create an autonomous application on the autonomous-agent's cluster @@ -232,6 +263,37 @@ func (suite *SyncTestSuite) Test_Sync_Autonomous() { return err == nil && app.Status.Sync.Status == argoapp.SyncStatusCodeSynced }, 60*time.Second, 1*time.Second) + // Check that the .spec field of the principal matches that of the + // autonomous-agent + app = argoapp.Application{} + err = suite.AutonomousAgentClient.Get(suite.Ctx, agentKey, &app, metav1.GetOptions{}) + requires.NoError(err) + papp := argoapp.Application{} + err = suite.PrincipalClient.Get(suite.Ctx, principalKey, &papp, metav1.GetOptions{}) + requires.NoError(err) + requires.Equal(&app.Spec, &papp.Spec) + + // Modify the application on the autonomous-agent and ensure the change is + // propagated to the principal + err = suite.AutonomousAgentClient.EnsureApplicationUpdate(suite.Ctx, agentKey, func(app *argoapp.Application) error { + app.Spec.Info = []argoapp.Info{ + { + Name: "e2e", + Value: "test", + }, + } + return nil + }, metav1.UpdateOptions{}) + requires.NoError(err) + requires.Eventually(func() bool { + app := argoapp.Application{} + err := suite.PrincipalClient.Get(suite.Ctx, principalKey, &app, metav1.GetOptions{}) + return err == nil && + len(app.Spec.Info) == 1 && + app.Spec.Info[0].Name == "e2e" && + app.Spec.Info[0].Value == "test" + }, 30*time.Second, 1*time.Second) + // Delete the app from the autonomous-agent err = suite.AutonomousAgentClient.Delete(suite.Ctx, &app, metav1.DeleteOptions{}) requires.NoError(err) diff --git a/test/e2e2/test-env/start-agent-autonomous.sh b/test/e2e2/test-env/start-agent-autonomous.sh index faa01f9..c7f2aa0 100755 --- a/test/e2e2/test-env/start-agent-autonomous.sh +++ b/test/e2e2/test-env/start-agent-autonomous.sh @@ -13,12 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -e -o pipefail +set -ex -o pipefail ARGS=$* if ! kubectl config get-contexts | tail -n +2 | awk '{ print $2 }' | grep -qE '^vcluster-agent-autonomous$'; then echo "kube context vcluster-agent-autonomous is not configured; missing setup?" >&2 exit 1 fi SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" -test -f cmd/agent/main.go || (echo "Script should be run from argocd-agent's root path" >&2; exit 1) -go run ./cmd/agent/main.go --agent-mode autonomous --creds userpass:${SCRIPTPATH}/creds/creds.agent-autonomous --server-address 127.0.0.1 --server-port 8443 --insecure-tls --kubecontext vcluster-agent-autonomous --namespace argocd $ARGS +go run github.com/argoproj-labs/argocd-agent/cmd/agent --agent-mode autonomous --creds userpass:${SCRIPTPATH}/creds/creds.agent-autonomous --server-address 127.0.0.1 --server-port 8443 --insecure-tls --kubecontext vcluster-agent-autonomous --namespace argocd --log-level trace $ARGS diff --git a/test/e2e2/test-env/start-agent-managed.sh b/test/e2e2/test-env/start-agent-managed.sh index a710391..4f49491 100755 --- a/test/e2e2/test-env/start-agent-managed.sh +++ b/test/e2e2/test-env/start-agent-managed.sh @@ -20,5 +20,4 @@ if ! kubectl config get-contexts | tail -n +2 | awk '{ print $2 }' | grep -qE '^ exit 1 fi SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" -test -f cmd/agent/main.go || (echo "Script should be run from argocd-agent's root path" >&2; exit 1) -go run ./cmd/agent/main.go --agent-mode managed --creds userpass:${SCRIPTPATH}/creds/creds.agent-managed --server-address 127.0.0.1 --server-port 8443 --insecure-tls --kubecontext vcluster-agent-managed --namespace agent-managed $ARGS +go run github.com/argoproj-labs/argocd-agent/cmd/agent --agent-mode managed --creds userpass:${SCRIPTPATH}/creds/creds.agent-managed --server-address 127.0.0.1 --server-port 8443 --insecure-tls --kubecontext vcluster-agent-managed --namespace agent-managed --log-level trace $ARGS diff --git a/test/e2e2/test-env/start-principal.sh b/test/e2e2/test-env/start-principal.sh index 8b0570b..9a0ab0d 100755 --- a/test/e2e2/test-env/start-principal.sh +++ b/test/e2e2/test-env/start-principal.sh @@ -20,5 +20,4 @@ if ! kubectl config get-contexts | tail -n +2 | awk '{ print $2 }' | grep -qE '^ exit 1 fi SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" -test -f cmd/principal/main.go || (echo "Script should be run from argocd-agent's root path" >&2; exit 1) -go run ./cmd/principal --allowed-namespaces '*' --insecure-tls-generate --insecure-jwt-generate --kubecontext vcluster-control-plane --log-level trace --passwd ${SCRIPTPATH}/creds/users.control-plane $ARGS +go run github.com/argoproj-labs/argocd-agent/cmd/principal --allowed-namespaces '*' --insecure-tls-generate --insecure-jwt-generate --kubecontext vcluster-control-plane --log-level trace --passwd ${SCRIPTPATH}/creds/users.control-plane $ARGS From 5bd3dcad73d40cb4ee1d40dd901c155d77a2db32 Mon Sep 17 00:00:00 2001 From: John Pitman Date: Thu, 7 Nov 2024 14:50:49 -0500 Subject: [PATCH 4/4] generate server.secretkey for the agents Signed-off-by: John Pitman --- test/e2e2/test-env/setup-vcluster-env.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/test/e2e2/test-env/setup-vcluster-env.sh b/test/e2e2/test-env/setup-vcluster-env.sh index 3ec18d0..ff175fb 100755 --- a/test/e2e2/test-env/setup-vcluster-env.sh +++ b/test/e2e2/test-env/setup-vcluster-env.sh @@ -105,13 +105,23 @@ apply() { echo "-> TMP_DIR is $TMP_DIR" cp -r ${SCRIPTPATH}/* $TMP_DIR - # Comment out 'loadBalancerIP:' lines on OpenShift + # Comment out 'loadBalancerIP:' lines on OpenShift if [[ "$OPENSHIFT" != "" ]]; then sed -i.bak -e '/loadBalancerIP/s/^/#/' $TMP_DIR/control-plane/redis-service.yaml sed -i.bak -e '/loadBalancerIP/s/^/#/' $TMP_DIR/control-plane/repo-server-service.yaml sed -i.bak -e '/loadBalancerIP/s/^/#/' $TMP_DIR/control-plane/server-service.yaml fi + # Generate the server secret key for the argocd running on the managed and autonomous agent clusters + echo "-> Generate server.secretkey for agent's argocd-secrets" + if ! pwmake=$(which pwmake); then + pwmake=$(which pwgen) + fi + echo "data:" >> $TMP_DIR/agent-managed/argocd-secret.yaml + echo " server.secretkey: $($pwmake 56 | base64)" >> $TMP_DIR/agent-managed/argocd-secret.yaml + echo "data:" >> $TMP_DIR/agent-autonomous/argocd-secret.yaml + echo " server.secretkey: $($pwmake 56 | base64)" >> $TMP_DIR/agent-autonomous/argocd-secret.yaml + echo "-> Create Argo CD on control plane" cluster=control-plane