From 77b30e221e200ce282c8e542de888fa56245948a Mon Sep 17 00:00:00 2001 From: Shubham Chaudhary Date: Thu, 15 Dec 2022 16:42:27 +0530 Subject: [PATCH] (chore): Adding user-friendly failsteps and removing non-litmus libs (#626) * feat(failstep): Adding failstep in all experiment and removed non-litmus libs Signed-off-by: Shubham Chaudhary --- .github/workflows/build.yml | 6 +- .github/workflows/push.yml | 4 +- .github/workflows/release.yml | 4 +- .github/workflows/run-e2e-on-pr-commits.yml | 6 +- build/Dockerfile | 2 +- .../litmus/aws-ssm-chaos/lib/ssm-chaos.go | 24 +- .../lib/ssm/aws-ssm-chaos-by-id.go | 20 +- .../lib/ssm/aws-ssm-chaos-by-tag.go | 18 +- .../azure-disk-loss/lib/azure-disk-loss.go | 78 +-- .../lib/azure-instance-stop.go | 58 +- .../container-kill/helper/container-kill.go | 68 ++- .../container-kill/lib/container-kill.go | 48 +- chaoslib/litmus/disk-fill/helper/disk-fill.go | 85 +-- chaoslib/litmus/disk-fill/lib/disk-fill.go | 52 +- .../lib/docker-service-kill.go | 23 +- .../lib/ebs-loss-by-id/lib/ebs-loss-by-id.go | 14 +- .../ebs-loss-by-tag/lib/ebs-loss-by-tag.go | 12 +- chaoslib/litmus/ebs-loss/lib/ebs-loss.go | 55 +- .../lib/ec2-terminate-by-id.go | 44 +- .../lib/ec2-terminate-by-tag.go | 56 +- .../lib/gcp-vm-disk-loss-by-label.go | 44 +- .../gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go | 59 +- .../lib/gcp-vm-instance-stop-by-label.go | 36 +- .../lib/gcp-vm-instance-stop.go | 36 +- .../litmus/http-chaos/helper/http-helper.go | 102 ++-- chaoslib/litmus/http-chaos/lib/http-chaos.go | 51 +- .../http-chaos/lib/statuscode/status-code.go | 6 +- .../lib/pod-delete.go | 32 +- .../lib/kubelet-service-kill.go | 22 +- chaoslib/litmus/network-chaos/helper/netem.go | 82 ++- .../litmus/network-chaos/lib/network-chaos.go | 100 ++-- .../litmus/node-cpu-hog/lib/node-cpu-hog.go | 58 +- chaoslib/litmus/node-drain/lib/node-drain.go | 49 +- .../node-io-stress/lib/node-io-stress.go | 44 +- .../node-memory-hog/lib/node-memory-hog.go | 74 +-- .../litmus/node-restart/lib/node-restart.go | 27 +- chaoslib/litmus/node-taint/lib/node-taint.go | 47 +- .../pod-autoscaler/lib/pod-autoscaler.go | 146 +++-- .../pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go | 121 +++-- chaoslib/litmus/pod-delete/lib/pod-delete.go | 45 +- .../litmus/pod-dns-chaos/helper/dnschaos.go | 58 +- .../litmus/pod-dns-chaos/lib/pod-dns-chaos.go | 44 +- .../pod-fio-stress/lib/pod-fio-stress.go | 107 ++-- .../lib/pod-memory-hog-exec.go | 112 ++-- .../lib/network-policy.go | 14 +- .../lib/pod-network-partition.go | 58 +- .../lib/redfish-node-restart.go | 11 +- .../lib/spring-boot-chaos.go | 66 ++- .../stress-chaos/helper/stress-helper.go | 96 ++-- .../litmus/stress-chaos/lib/stress-chaos.go | 54 +- .../litmus/vm-poweroff/lib/vm-poweroff.go | 30 +- .../powerfulseal/pod-delete/lib/pod-delete.go | 272 ---------- .../container-kill/lib/container-kill.go | 362 ------------ chaoslib/pumba/cpu-chaos/lib/cpu-chaos.go | 273 ---------- .../pumba/memory-chaos/lib/memory-chaos.go | 274 ---------- .../lib/corruption/corruption.go | 43 -- .../lib/duplication/duplication.go | 43 -- .../network-chaos/lib/latency/latency.go | 43 -- chaoslib/pumba/network-chaos/lib/loss/loss.go | 43 -- .../pumba/network-chaos/lib/network-chaos.go | 302 ----------- .../pumba/pod-io-stress/lib/pod-io-stress.go | 298 ---------- .../experiment/aws-ssm-chaos-by-id.go | 95 ++-- .../experiment/aws-ssm-chaos-by-tag.go | 90 +-- .../experiment/azure-disk-loss.go | 82 +-- .../experiment/azure-instance-stop.go | 82 +-- .../experiment/redfish-node-restart.go | 53 +- .../pod-delete/experiment/pod-delete.go | 50 +- .../experiment/gcp-vm-disk-loss-by-label.go | 38 +- .../experiment/gcp-vm-disk-loss.go | 41 +- .../gcp-vm-instance-stop-by-label.go | 38 +- .../experiment/gcp-vm-instance-stop.go | 40 +- .../experiment/container-kill.go | 53 +- .../generic/disk-fill/experiment/disk-fill.go | 38 +- .../experiment/docker-service-kill.go | 45 +- .../experiment/kubelet-service-kill.go | 45 +- .../node-cpu-hog/experiment/node-cpu-hog.go | 47 +- .../node-drain/experiment/node-drain.go | 47 +- .../experiment/node-io-stress.go | 47 +- .../experiment/node-memory-hog.go | 47 +- .../node-restart/experiment/node-restart.go | 50 +- .../node-taint/experiment/node-taint.go | 47 +- .../experiment/pod-autoscaler.go | 34 +- .../experiment/pod-cpu-hog-exec.go | 43 +- .../pod-cpu-hog/experiment/pod-cpu-hog.go | 49 +- .../pod-delete/experiment/pod-delete.go | 76 ++- .../pod-dns-error/experiment/pod-dns-error.go | 46 +- .../pod-dns-spoof/experiment/pod-dns-spoof.go | 46 +- .../experiment/pod-fio-stress.go | 43 +- .../experiment/pod-http-latency.go | 40 +- .../experiment/pod-http-modify-body.go | 41 +- .../experiment/pod-http-modify-header.go | 40 +- .../experiment/pod-http-reset-peer.go | 44 +- .../experiment/pod-http-status-code.go | 44 +- .../pod-io-stress/experiment/pod-io-stress.go | 52 +- .../experiment/pod-memory-hog-exec.go | 41 +- .../experiment/pod-memory-hog.go | 47 +- .../experiment/pod-network-corruption.go | 52 +- .../experiment/pod-network-duplication.go | 52 +- .../experiment/pod-network-latency.go | 52 +- .../experiment/pod-network-loss.go | 54 +- .../experiment/pod-network-partition.go | 44 +- .../experiment/kafka-broker-pod-failure.go | 44 +- .../experiment/ebs-loss-by-id.go | 77 +-- .../experiment/ebs-loss-by-tag.go | 77 +-- .../experiment/ec2-terminate-by-id.go | 87 +-- .../experiment/ec2-terminate-tag.go | 87 +-- .../experiment/spring-boot-faults.go | 41 +- .../vm-poweroff/experiment/vm-poweroff.go | 83 +-- go.mod | 5 +- go.sum | 513 +----------------- .../aws-ssm-chaos/environment/environment.go | 5 +- pkg/aws-ssm/aws-ssm-chaos/types/types.go | 4 - .../disk-loss/environment/environment.go | 8 +- pkg/azure/disk-loss/types/types.go | 36 +- .../instance-stop/environment/environment.go | 9 +- pkg/azure/instance-stop/types/types.go | 7 - .../environment/environment.go | 1 - .../redfish-node-restart/types/types.go | 32 +- pkg/baremetal/redfish/redfish.go | 35 +- pkg/cassandra/liveness.go | 46 +- pkg/cassandra/node-tools.go | 20 +- .../pod-delete/environment/environment.go | 1 - pkg/cerrors/custom_errors.go | 105 ++++ pkg/cloud/aws/ebs/ebs-operations.go | 51 +- pkg/cloud/aws/ebs/ebs-volume-state.go | 79 ++- pkg/cloud/aws/ec2/ec2-instance-status.go | 78 ++- pkg/cloud/aws/ec2/ec2-operations.go | 52 +- pkg/cloud/aws/ssm/ssm-documentation.go | 25 +- pkg/cloud/aws/ssm/ssm-operations.go | 79 ++- pkg/cloud/azure/common/common.go | 27 +- pkg/cloud/azure/disk/disk-operations.go | 105 ++-- pkg/cloud/azure/disk/disk-status.go | 110 ++-- .../azure/instance/instance-operations.go | 71 ++- pkg/cloud/azure/instance/instance-status.go | 103 +++- pkg/cloud/gcp/disk-operations.go | 21 +- pkg/cloud/gcp/disk-volume-status.go | 34 +- pkg/cloud/gcp/get-credentials-json.go | 10 +- pkg/cloud/gcp/vm-instance-status.go | 13 +- pkg/cloud/gcp/vm-operations.go | 25 +- pkg/cloud/vmware/get-vcenter-cookie.go | 38 +- pkg/cloud/vmware/vm-operations.go | 113 +++- pkg/cloud/vmware/vm-status.go | 68 ++- .../environment/environment.go | 1 - pkg/gcp/gcp-vm-disk-loss/types/types.go | 1 - .../environment/environment.go | 1 - pkg/gcp/gcp-vm-instance-stop/types/types.go | 1 - .../container-kill/environment/environment.go | 1 - pkg/generic/container-kill/types/types.go | 1 - .../disk-fill/environment/environment.go | 1 - pkg/generic/disk-fill/types/types.go | 1 - .../environment/environment.go | 1 - .../docker-service-kill/types/types.go | 1 - .../http-chaos/environment/environment.go | 1 - pkg/generic/http-chaos/types/types.go | 1 - .../environment/environment.go | 1 - .../kubelet-service-kill/types/types.go | 1 - .../network-chaos/environment/environment.go | 2 - pkg/generic/network-chaos/types/types.go | 2 - .../node-cpu-hog/environment/environment.go | 1 - pkg/generic/node-cpu-hog/types/types.go | 1 - .../node-drain/environment/environment.go | 1 - pkg/generic/node-drain/types/types.go | 1 - .../node-io-stress/environment/environment.go | 1 - pkg/generic/node-io-stress/types/types.go | 1 - .../environment/environment.go | 1 - pkg/generic/node-memory-hog/types/types.go | 1 - .../node-restart/environment/environment.go | 1 - pkg/generic/node-restart/types/types.go | 1 - .../node-taint/environment/environment.go | 1 - pkg/generic/node-taint/types/types.go | 1 - .../pod-autoscaler/environment/environment.go | 1 - pkg/generic/pod-autoscaler/types/types.go | 1 - .../environment/environment.go | 1 - pkg/generic/pod-cpu-hog-exec/types/types.go | 1 - .../pod-delete/environment/environment.go | 1 - pkg/generic/pod-delete/types/types.go | 1 - .../pod-dns-chaos/environment/environment.go | 1 - pkg/generic/pod-dns-chaos/types/types.go | 1 - .../pod-fio-stress/environment/environment.go | 1 - pkg/generic/pod-fio-stress/types/types.go | 1 - .../environment/environment.go | 1 - .../pod-memory-hog-exec/types/types.go | 1 - .../environment/environment.go | 1 - .../pod-network-partition/types/types.go | 1 - .../stress-chaos/environment/environment.go | 2 - pkg/generic/stress-chaos/types/types.go | 2 - pkg/kafka/environment/environment.go | 5 +- pkg/kafka/kafka-liveness-cleanup.go | 9 +- pkg/kafka/kafka-liveness-stream.go | 15 +- .../ebs-loss/environment/environment.go | 8 +- pkg/kube-aws/ebs-loss/types/types.go | 4 - .../environment/environment.go | 8 +- .../ec2-terminate-by-id/types/types.go | 36 +- .../environment/environment.go | 8 +- .../ec2-terminate-by-tag/types/types.go | 4 - pkg/probe/cmdprobe.go | 72 +-- pkg/probe/comparator/comparator.go | 29 +- pkg/probe/comparator/float.go | 25 +- pkg/probe/comparator/integer.go | 25 +- pkg/probe/comparator/string.go | 23 +- pkg/probe/httpprobe.go | 32 +- pkg/probe/k8sprobe.go | 46 +- pkg/probe/probe.go | 61 ++- pkg/probe/promProbe.go | 31 +- pkg/result/chaosresult.go | 138 ++++- .../environment/environment.go | 1 - .../spring-boot-chaos/types/types.go | 1 - pkg/status/application.go | 71 +-- pkg/status/nodes.go | 21 +- pkg/types/types.go | 23 +- pkg/utils/common/common.go | 33 +- pkg/utils/common/nodes.go | 92 +--- pkg/utils/common/pid.go | 85 +-- pkg/utils/common/pods.go | 143 ++--- pkg/utils/exec/exec.go | 35 +- .../vm-poweroff/environment/environment.go | 8 +- pkg/vmware/vm-poweroff/types/types.go | 35 +- pkg/workloads/workloads.go | 50 +- 218 files changed, 3939 insertions(+), 6187 deletions(-) delete mode 100644 chaoslib/powerfulseal/pod-delete/lib/pod-delete.go delete mode 100644 chaoslib/pumba/container-kill/lib/container-kill.go delete mode 100644 chaoslib/pumba/cpu-chaos/lib/cpu-chaos.go delete mode 100644 chaoslib/pumba/memory-chaos/lib/memory-chaos.go delete mode 100644 chaoslib/pumba/network-chaos/lib/corruption/corruption.go delete mode 100644 chaoslib/pumba/network-chaos/lib/duplication/duplication.go delete mode 100644 chaoslib/pumba/network-chaos/lib/latency/latency.go delete mode 100644 chaoslib/pumba/network-chaos/lib/loss/loss.go delete mode 100644 chaoslib/pumba/network-chaos/lib/network-chaos.go delete mode 100644 chaoslib/pumba/pod-io-stress/lib/pod-io-stress.go create mode 100644 pkg/cerrors/custom_errors.go diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0c70bdb7d..806f712a7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: 1.17 + go-version: 1.18 - uses: actions/checkout@v2 with: @@ -49,7 +49,7 @@ jobs: - run: snyk auth ${SNYK_TOKEN} - uses: actions/setup-go@v1 with: - go-version: '1.17' + go-version: '1.18' - name: Snyk monitor run: snyk test @@ -60,7 +60,7 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: 1.17 + go-version: 1.18 - uses: actions/checkout@v2 with: diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 4c44ce5a4..e093af663 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -13,7 +13,7 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: 1.17 + go-version: 1.18 - uses: actions/checkout@v2 #TODO: Add Dockerfile linting @@ -43,7 +43,7 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: 1.17 + go-version: 1.18 - uses: actions/checkout@v2 - name: Set up QEMU diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d939237c0..3986f7c5e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,7 +13,7 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: 1.17 + go-version: 1.18 - uses: actions/checkout@v2 #TODO: Add Dockerfile linting @@ -30,7 +30,7 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: 1.17 + go-version: 1.18 - uses: actions/checkout@v2 - name: Set Tag diff --git a/.github/workflows/run-e2e-on-pr-commits.yml b/.github/workflows/run-e2e-on-pr-commits.yml index f76bad262..3837eb0bf 100644 --- a/.github/workflows/run-e2e-on-pr-commits.yml +++ b/.github/workflows/run-e2e-on-pr-commits.yml @@ -217,7 +217,7 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v2 with: @@ -260,7 +260,7 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v2 with: @@ -302,7 +302,7 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v2 with: diff --git a/build/Dockerfile b/build/Dockerfile index 5840d24a9..16aca31f9 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -1,6 +1,6 @@ # Multi-stage docker build # Build stage -FROM golang:1.17 AS builder +FROM golang:1.18 AS builder ARG TARGETOS=linux ARG TARGETARCH diff --git a/chaoslib/litmus/aws-ssm-chaos/lib/ssm-chaos.go b/chaoslib/litmus/aws-ssm-chaos/lib/ssm-chaos.go index 37edc70a1..9205e7f53 100644 --- a/chaoslib/litmus/aws-ssm-chaos/lib/ssm-chaos.go +++ b/chaoslib/litmus/aws-ssm-chaos/lib/ssm-chaos.go @@ -13,7 +13,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) // InjectChaosInSerialMode will inject the aws ssm chaos in serial mode that is one after other @@ -46,7 +46,7 @@ func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai ec2IDList := strings.Fields(ec2ID) commandId, err := ssm.SendSSMCommand(experimentsDetails, ec2IDList) if err != nil { - return errors.Errorf("fail to send ssm command, err: %v", err) + return stacktrace.Propagate(err, "failed to send ssm command") } //prepare commands for abort recovery experimentsDetails.CommandIDs = append(experimentsDetails.CommandIDs, commandId) @@ -54,21 +54,21 @@ func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //wait for the ssm command to get in running state log.Info("[Wait]: Waiting for the ssm command to get in InProgress state") if err := ssm.WaitForCommandStatus("InProgress", commandId, ec2ID, experimentsDetails.Region, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.Delay); err != nil { - return errors.Errorf("fail to start ssm command, err: %v", err) + return stacktrace.Propagate(err, "failed to start ssm command") } common.SetTargets(ec2ID, "injected", "EC2", chaosDetails) // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 && i == 0 { if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } //wait for the ssm command to get succeeded in the given chaos duration log.Info("[Wait]: Waiting for the ssm command to get completed") if err := ssm.WaitForCommandStatus("Success", commandId, ec2ID, experimentsDetails.Region, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.Delay); err != nil { - return errors.Errorf("fail to send ssm command, err: %v", err) + return stacktrace.Propagate(err, "failed to send ssm command") } common.SetTargets(ec2ID, "reverted", "EC2", chaosDetails) @@ -110,7 +110,7 @@ func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Info("[Chaos]: Starting the ssm command") commandId, err := ssm.SendSSMCommand(experimentsDetails, instanceIDList) if err != nil { - return errors.Errorf("fail to send ssm command, err: %v", err) + return stacktrace.Propagate(err, "failed to send ssm command") } //prepare commands for abort recovery experimentsDetails.CommandIDs = append(experimentsDetails.CommandIDs, commandId) @@ -119,14 +119,14 @@ func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //wait for the ssm command to get in running state log.Info("[Wait]: Waiting for the ssm command to get in InProgress state") if err := ssm.WaitForCommandStatus("InProgress", commandId, ec2ID, experimentsDetails.Region, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.Delay); err != nil { - return errors.Errorf("fail to start ssm command, err: %v", err) + return stacktrace.Propagate(err, "failed to start ssm command") } } // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -134,7 +134,7 @@ func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //wait for the ssm command to get succeeded in the given chaos duration log.Info("[Wait]: Waiting for the ssm command to get completed") if err := ssm.WaitForCommandStatus("Success", commandId, ec2ID, experimentsDetails.Region, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.Delay); err != nil { - return errors.Errorf("fail to send ssm command, err: %v", err) + return stacktrace.Propagate(err, "failed to send ssm command") } } @@ -159,14 +159,14 @@ func AbortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, abort c case len(experimentsDetails.CommandIDs) != 0: for _, commandId := range experimentsDetails.CommandIDs { if err := ssm.CancelCommand(commandId, experimentsDetails.Region); err != nil { - log.Errorf("[Abort]: fail to cancle command, recovery failed, err: %v", err) + log.Errorf("[Abort]: Failed to cancel command, recovery failed: %v", err) } } default: - log.Info("[Abort]: No command found to cancle") + log.Info("[Abort]: No SSM Command found to cancel") } if err := ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region); err != nil { - log.Errorf("fail to delete ssm doc, err: %v", err) + log.Errorf("Failed to delete ssm document: %v", err) } log.Info("[Abort]: Chaos Revert Completed") os.Exit(1) diff --git a/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-id.go b/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-id.go index 9ca7b8404..0eb99d158 100644 --- a/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-id.go +++ b/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-id.go @@ -1,6 +1,7 @@ package ssm import ( + "fmt" "os" "os/signal" "strings" @@ -8,12 +9,13 @@ import ( "github.com/litmuschaos/litmus-go/chaoslib/litmus/aws-ssm-chaos/lib" experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) var ( @@ -21,7 +23,7 @@ var ( inject, abort chan os.Signal ) -//PrepareAWSSSMChaosByID contains the prepration and injection steps for the experiment +// PrepareAWSSSMChaosByID contains the prepration and injection steps for the experiment func PrepareAWSSSMChaosByID(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // inject channel is used to transmit signal notifications. @@ -42,7 +44,7 @@ func PrepareAWSSSMChaosByID(experimentsDetails *experimentTypes.ExperimentDetail //create and upload the ssm document on the given aws service monitoring docs if err = ssm.CreateAndUploadDocument(experimentsDetails.DocumentName, experimentsDetails.DocumentType, experimentsDetails.DocumentFormat, experimentsDetails.DocumentPath, experimentsDetails.Region); err != nil { - return errors.Errorf("fail to create and upload ssm doc, err: %v", err) + return stacktrace.Propagate(err, "could not create and upload the ssm document") } experimentsDetails.IsDocsUploaded = true log.Info("[Info]: SSM docs uploaded successfully") @@ -52,27 +54,27 @@ func PrepareAWSSSMChaosByID(experimentsDetails *experimentTypes.ExperimentDetail //get the instance id or list of instance ids instanceIDList := strings.Split(experimentsDetails.EC2InstanceID, ",") - if len(instanceIDList) == 0 { - return errors.Errorf("no instance id found for chaos injection") + if experimentsDetails.EC2InstanceID == "" || len(instanceIDList) == 0 { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no instance id found for chaos injection"} } switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = lib.InjectChaosInSerialMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = lib.InjectChaosInParallelMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Delete the ssm document on the given aws service monitoring docs err = ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region) if err != nil { - return errors.Errorf("fail to delete ssm doc, err: %v", err) + return stacktrace.Propagate(err, "failed to delete ssm doc") } //Waiting for the ramp time after chaos injection diff --git a/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-tag.go b/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-tag.go index 43e31d92c..99884e697 100644 --- a/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-tag.go +++ b/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-tag.go @@ -1,6 +1,7 @@ package ssm import ( + "fmt" "os" "os/signal" "strings" @@ -8,15 +9,16 @@ import ( "github.com/litmuschaos/litmus-go/chaoslib/litmus/aws-ssm-chaos/lib" experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) -//PrepareAWSSSMChaosByTag contains the prepration and injection steps for the experiment +// PrepareAWSSSMChaosByTag contains the prepration and injection steps for the experiment func PrepareAWSSSMChaosByTag(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // inject channel is used to transmit signal notifications. @@ -37,7 +39,7 @@ func PrepareAWSSSMChaosByTag(experimentsDetails *experimentTypes.ExperimentDetai //create and upload the ssm document on the given aws service monitoring docs if err = ssm.CreateAndUploadDocument(experimentsDetails.DocumentName, experimentsDetails.DocumentType, experimentsDetails.DocumentFormat, experimentsDetails.DocumentPath, experimentsDetails.Region); err != nil { - return errors.Errorf("fail to create and upload ssm doc, err: %v", err) + return stacktrace.Propagate(err, "could not create and upload the ssm document") } experimentsDetails.IsDocsUploaded = true log.Info("[Info]: SSM docs uploaded successfully") @@ -48,26 +50,26 @@ func PrepareAWSSSMChaosByTag(experimentsDetails *experimentTypes.ExperimentDetai log.Infof("[Chaos]:Number of Instance targeted: %v", len(instanceIDList)) if len(instanceIDList) == 0 { - return errors.Errorf("no instance id found for chaos injection") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no instance id found for chaos injection"} } switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = lib.InjectChaosInSerialMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = lib.InjectChaosInParallelMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Delete the ssm document on the given aws service monitoring docs err = ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region) if err != nil { - return errors.Errorf("fail to delete ssm doc, err: %v", err) + return stacktrace.Propagate(err, "failed to delete ssm doc") } //Waiting for the ramp time after chaos injection diff --git a/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go b/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go index a2d672bc0..aa2c16ee8 100644 --- a/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go +++ b/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go @@ -1,6 +1,7 @@ package lib import ( + "fmt" "os" "os/signal" "strings" @@ -9,6 +10,7 @@ import ( "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" experimentTypes "github.com/litmuschaos/litmus-go/pkg/azure/disk-loss/types" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" diskStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/disk" instanceStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/instance" @@ -18,7 +20,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) var ( @@ -26,7 +28,7 @@ var ( inject, abort chan os.Signal ) -//PrepareChaos contains the prepration and injection steps for the experiment +// PrepareChaos contains the prepration and injection steps for the experiment func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // inject channel is used to transmit signal notifications. @@ -47,13 +49,13 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients //get the disk name or list of disk names diskNameList := strings.Split(experimentsDetails.VirtualDiskNames, ",") - if len(diskNameList) == 0 { - return errors.Errorf("no volume names found to detach") + if experimentsDetails.VirtualDiskNames == "" || len(diskNameList) == 0 { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no volume names found to detach"} } instanceNamesWithDiskNames, err := diskStatus.GetInstanceNameForDisks(diskNameList, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup) if err != nil { - return errors.Errorf("error fetching attached instances for disks, err: %v", err) + return stacktrace.Propagate(err, "error fetching attached instances for disks") } // Get the instance name with attached disks @@ -62,7 +64,7 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients for instanceName := range instanceNamesWithDiskNames { attachedDisksWithInstance[instanceName], err = diskStatus.GetInstanceDiskList(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, experimentsDetails.ScaleSet, instanceName) if err != nil { - return errors.Errorf("error fetching virtual disks, err: %v", err) + return stacktrace.Propagate(err, "error fetching virtual disks") } } @@ -78,14 +80,14 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, instanceNamesWithDiskNames, attachedDisksWithInstance, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, instanceNamesWithDiskNames, attachedDisksWithInstance, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection @@ -97,7 +99,7 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients return nil } -// injectChaosInParallelMode will inject the azure disk loss chaos in parallel mode that is all at once +// injectChaosInParallelMode will inject the Azure disk loss chaos in parallel mode that is all at once func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesWithDiskNames map[string][]string, attachedDisksWithInstance map[string]*[]compute.DataDisk, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin @@ -107,7 +109,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet for duration < experimentsDetails.ChaosDuration { if experimentsDetails.EngineName != "" { - msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on azure virtual disk" + msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on Azure virtual disk" types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails) events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine") } @@ -116,7 +118,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Info("[Chaos]: Detaching the virtual disks from the instances") for instanceName, diskNameList := range instanceNamesWithDiskNames { if err = diskStatus.DetachDisks(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet, diskNameList); err != nil { - return errors.Errorf("failed to detach disks, err: %v", err) + return stacktrace.Propagate(err, "failed to detach disks") } } // Waiting for disk to be detached @@ -124,7 +126,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet for _, diskName := range diskNameList { log.Infof("[Wait]: Waiting for Disk '%v' to detach", diskName) if err := diskStatus.WaitForDiskToDetach(experimentsDetails, diskName); err != nil { - return errors.Errorf("disk attach check failed, err: %v", err) + return stacktrace.Propagate(err, "disk detachment check failed") } } } @@ -138,7 +140,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -150,24 +152,24 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Info("[Chaos]: Attaching the Virtual disks back to the instances") for instanceName, diskNameList := range attachedDisksWithInstance { if err = diskStatus.AttachDisk(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet, diskNameList); err != nil { - return errors.Errorf("virtual disk attachment failed, err: %v", err) + return stacktrace.Propagate(err, "virtual disk attachment failed") } - } - // Wait for disk to be attached - for _, diskNameList := range instanceNamesWithDiskNames { - for _, diskName := range diskNameList { - log.Infof("[Wait]: Waiting for Disk '%v' to attach", diskName) - if err := diskStatus.WaitForDiskToAttach(experimentsDetails, diskName); err != nil { - return errors.Errorf("disk attach check failed, err: %v", err) + // Wait for disk to be attached + for _, diskNameList := range instanceNamesWithDiskNames { + for _, diskName := range diskNameList { + log.Infof("[Wait]: Waiting for Disk '%v' to attach", diskName) + if err := diskStatus.WaitForDiskToAttach(experimentsDetails, diskName); err != nil { + return stacktrace.Propagate(err, "disk attachment check failed") + } } } - } - // Updating the result details - for _, diskNameList := range instanceNamesWithDiskNames { - for _, diskName := range diskNameList { - common.SetTargets(diskName, "re-attached", "VirtualDisk", chaosDetails) + // Updating the result details + for _, diskNameList := range instanceNamesWithDiskNames { + for _, diskName := range diskNameList { + common.SetTargets(diskName, "re-attached", "VirtualDisk", chaosDetails) + } } } duration = int(time.Since(ChaosStartTimeStamp).Seconds()) @@ -175,7 +177,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet return nil } -//injectChaosInSerialMode will inject the azure disk loss chaos in serial mode that is one after other +// injectChaosInSerialMode will inject the Azure disk loss chaos in serial mode that is one after other func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesWithDiskNames map[string][]string, attachedDisksWithInstance map[string]*[]compute.DataDisk, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin @@ -185,7 +187,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai for duration < experimentsDetails.ChaosDuration { if experimentsDetails.EngineName != "" { - msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on azure virtual disks" + msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on Azure virtual disks" types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails) events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine") } @@ -198,13 +200,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // Detaching the virtual disks log.Infof("[Chaos]: Detaching %v from the instance", diskName) if err = diskStatus.DetachDisks(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet, diskNameToList); err != nil { - return errors.Errorf("failed to detach disks, err: %v", err) + return stacktrace.Propagate(err, "failed to detach disks") } // Waiting for disk to be detached log.Infof("[Wait]: Waiting for Disk '%v' to detach", diskName) if err := diskStatus.WaitForDiskToDetach(experimentsDetails, diskName); err != nil { - return errors.Errorf("disk detach check failed, err: %v", err) + return stacktrace.Propagate(err, "disk detachment check failed") } common.SetTargets(diskName, "detached", "VirtualDisk", chaosDetails) @@ -213,7 +215,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -224,13 +226,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Attaching the virtual disks to the instance log.Infof("[Chaos]: Attaching %v back to the instance", diskName) if err = diskStatus.AttachDisk(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet, attachedDisksWithInstance[instanceName]); err != nil { - return errors.Errorf("disk attachment failed, err: %v", err) + return stacktrace.Propagate(err, "disk attachment failed") } // Waiting for disk to be attached log.Infof("[Wait]: Waiting for Disk '%v' to attach", diskName) if err := diskStatus.WaitForDiskToAttach(experimentsDetails, diskName); err != nil { - return errors.Errorf("disk attach check failed, err: %v", err) + return stacktrace.Propagate(err, "disk attachment check failed") } common.SetTargets(diskName, "re-attached", "VirtualDisk", chaosDetails) @@ -257,10 +259,10 @@ func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, attache Try(func(attempt uint) error { status, err := instanceStatus.GetAzureInstanceProvisionStatus(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet) if err != nil { - return errors.Errorf("Failed to get instance, err: %v", err) + return stacktrace.Propagate(err, "failed to get instance") } if status != "Provisioning succeeded" { - return errors.Errorf("instance is updating, waiting for instance to finish update") + return stacktrace.Propagate(err, "instance is updating, waiting for instance to finish update") } return nil }) @@ -271,11 +273,11 @@ func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, attache for _, disk := range *diskList { diskStatusString, err := diskStatus.GetDiskStatus(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, *disk.Name) if err != nil { - log.Errorf("Failed to get disk status, err: %v", err) + log.Errorf("Failed to get disk status: %v", err) } if diskStatusString != "Attached" { if err := diskStatus.AttachDisk(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet, diskList); err != nil { - log.Errorf("failed to attach disk, manual revert required, err: %v", err) + log.Errorf("Failed to attach disk, manual revert required: %v", err) } else { common.SetTargets(*disk.Name, "re-attached", "VirtualDisk", chaosDetails) } diff --git a/chaoslib/litmus/azure-instance-stop/lib/azure-instance-stop.go b/chaoslib/litmus/azure-instance-stop/lib/azure-instance-stop.go index f15330682..8b3950da7 100644 --- a/chaoslib/litmus/azure-instance-stop/lib/azure-instance-stop.go +++ b/chaoslib/litmus/azure-instance-stop/lib/azure-instance-stop.go @@ -1,6 +1,7 @@ package lib import ( + "fmt" "os" "os/signal" "strings" @@ -8,6 +9,7 @@ import ( "time" experimentTypes "github.com/litmuschaos/litmus-go/pkg/azure/instance-stop/types" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" azureCommon "github.com/litmuschaos/litmus-go/pkg/cloud/azure/common" azureStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/instance" @@ -16,7 +18,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) var ( @@ -44,8 +46,8 @@ func PrepareAzureStop(experimentsDetails *experimentTypes.ExperimentDetails, cli // get the instance name or list of instance names instanceNameList := strings.Split(experimentsDetails.AzureInstanceNames, ",") - if len(instanceNameList) == 0 { - return errors.Errorf("no instance name found to stop") + if experimentsDetails.AzureInstanceNames == "" || len(instanceNameList) == 0 { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no instance name found to stop"} } // watching for the abort signal and revert the chaos @@ -54,14 +56,14 @@ func PrepareAzureStop(experimentsDetails *experimentTypes.ExperimentDetails, cli switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, instanceNameList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, instanceNameList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } // Waiting for the ramp time after chaos injection @@ -72,7 +74,7 @@ func PrepareAzureStop(experimentsDetails *experimentTypes.ExperimentDetails, cli return nil } -// injectChaosInSerialMode will inject the azure instance termination in serial mode that is one after the other +// injectChaosInSerialMode will inject the Azure instance termination in serial mode that is one after the other func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceNameList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { select { case <-inject: @@ -88,7 +90,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Infof("[Info]: Target instanceName list, %v", instanceNameList) if experimentsDetails.EngineName != "" { - msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on azure instance" + msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on Azure instance" types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails) events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine") } @@ -100,25 +102,25 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Infof("[Chaos]: Stopping the Azure instance: %v", vmName) if experimentsDetails.ScaleSet == "enable" { if err := azureStatus.AzureScaleSetInstanceStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - return errors.Errorf("unable to stop the Azure instance, err: %v", err) + return stacktrace.Propagate(err, "unable to stop the Azure instance") } } else { if err := azureStatus.AzureInstanceStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - return errors.Errorf("unable to stop the Azure instance, err: %v", err) + return stacktrace.Propagate(err, "unable to stop the Azure instance") } } // Wait for Azure instance to completely stop log.Infof("[Wait]: Waiting for Azure instance '%v' to get in the stopped state", vmName) if err := azureStatus.WaitForAzureComputeDown(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - return errors.Errorf("instance poweroff status check failed, err: %v", err) + return stacktrace.Propagate(err, "instance poweroff status check failed") } // Run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -130,18 +132,18 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Info("[Chaos]: Starting back the Azure instance") if experimentsDetails.ScaleSet == "enable" { if err := azureStatus.AzureScaleSetInstanceStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - return errors.Errorf("unable to start the Azure instance, err: %v", err) + return stacktrace.Propagate(err, "unable to start the Azure instance") } } else { if err := azureStatus.AzureInstanceStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - return errors.Errorf("unable to start the Azure instance, err: %v", err) + return stacktrace.Propagate(err, "unable to start the Azure instance") } } // Wait for Azure instance to get in running state log.Infof("[Wait]: Waiting for Azure instance '%v' to get in the running state", vmName) if err := azureStatus.WaitForAzureComputeUp(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - return errors.Errorf("instance power on status check failed, err: %v", err) + return stacktrace.Propagate(err, "instance power on status check failed") } } duration = int(time.Since(ChaosStartTimeStamp).Seconds()) @@ -150,7 +152,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai return nil } -// injectChaosInParallelMode will inject the azure instance termination in parallel mode that is all at once +// injectChaosInParallelMode will inject the Azure instance termination in parallel mode that is all at once func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceNameList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { select { case <-inject: @@ -177,11 +179,11 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Infof("[Chaos]: Stopping the Azure instance: %v", vmName) if experimentsDetails.ScaleSet == "enable" { if err := azureStatus.AzureScaleSetInstanceStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - return errors.Errorf("unable to stop azure instance, err: %v", err) + return stacktrace.Propagate(err, "unable to stop Azure instance") } } else { if err := azureStatus.AzureInstanceStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - return errors.Errorf("unable to stop azure instance, err: %v", err) + return stacktrace.Propagate(err, "unable to stop Azure instance") } } } @@ -190,14 +192,14 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet for _, vmName := range instanceNameList { log.Infof("[Wait]: Waiting for Azure instance '%v' to get in the stopped state", vmName) if err := azureStatus.WaitForAzureComputeDown(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - return errors.Errorf("instance poweroff status check failed, err: %v", err) + return stacktrace.Propagate(err, "instance poweroff status check failed") } } // Run probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -210,11 +212,11 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Infof("[Chaos]: Starting back the Azure instance: %v", vmName) if experimentsDetails.ScaleSet == "enable" { if err := azureStatus.AzureScaleSetInstanceStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - return errors.Errorf("unable to start the Azure instance, err: %v", err) + return stacktrace.Propagate(err, "unable to start the Azure instance") } } else { if err := azureStatus.AzureInstanceStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - return errors.Errorf("unable to start the Azure instance, err: %v", err) + return stacktrace.Propagate(err, "unable to start the Azure instance") } } } @@ -223,7 +225,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet for _, vmName := range instanceNameList { log.Infof("[Wait]: Waiting for Azure instance '%v' to get in the running state", vmName) if err := azureStatus.WaitForAzureComputeUp(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - return errors.Errorf("instance power on status check failed, err: %v", err) + return stacktrace.Propagate(err, "instance power on status check failed") } } @@ -248,22 +250,22 @@ func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, instanc instanceState, err = azureStatus.GetAzureInstanceStatus(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName) } if err != nil { - log.Errorf("[Abort]: Fail to get instance status when an abort signal is received, err: %v", err) + log.Errorf("[Abort]: Failed to get instance status when an abort signal is received: %v", err) } if instanceState != "VM running" && instanceState != "VM starting" { log.Info("[Abort]: Waiting for the Azure instance to get down") if err := azureStatus.WaitForAzureComputeDown(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - log.Errorf("[Abort]: Instance power off status check failed, err: %v", err) + log.Errorf("[Abort]: Instance power off status check failed: %v", err) } log.Info("[Abort]: Starting Azure instance as abort signal received") if experimentsDetails.ScaleSet == "enable" { if err := azureStatus.AzureScaleSetInstanceStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - log.Errorf("[Abort]: Unable to start the Azure instance, err: %v", err) + log.Errorf("[Abort]: Unable to start the Azure instance: %v", err) } } else { if err := azureStatus.AzureInstanceStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil { - log.Errorf("[Abort]: Unable to start the Azure instance, err: %v", err) + log.Errorf("[Abort]: Unable to start the Azure instance: %v", err) } } } @@ -271,7 +273,7 @@ func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, instanc log.Info("[Abort]: Waiting for the Azure instance to start") err := azureStatus.WaitForAzureComputeUp(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName) if err != nil { - log.Errorf("[Abort]: Instance power on status check failed, err: %v", err) + log.Errorf("[Abort]: Instance power on status check failed: %v", err) log.Errorf("[Abort]: Azure instance %v failed to start after an abort signal is received", vmName) } } diff --git a/chaoslib/litmus/container-kill/helper/container-kill.go b/chaoslib/litmus/container-kill/helper/container-kill.go index 5341b5701..d92b15956 100644 --- a/chaoslib/litmus/container-kill/helper/container-kill.go +++ b/chaoslib/litmus/container-kill/helper/container-kill.go @@ -4,7 +4,9 @@ import ( "bytes" "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/result" + "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" "os/exec" "strconv" @@ -17,7 +19,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientTypes "k8s.io/apimachinery/pkg/types" ) @@ -36,14 +37,18 @@ func Helper(clients clients.ClientSets) { log.Info("[PreReq]: Getting the ENV variables") getENV(&experimentsDetails) - // Intialise the chaos attributes + // Initialise the chaos attributes types.InitialiseChaosVariables(&chaosDetails) + chaosDetails.Phase = types.ChaosInjectPhase - // Intialise Chaos Result Parameters + // Initialise Chaos Result Parameters types.SetResultAttributes(&resultDetails, chaosDetails) - err := killContainer(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails) - if err != nil { + if err := killContainer(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails); err != nil { + // update failstep inside chaosresult + if resultErr := result.UpdateFailedStepFromHelper(&resultDetails, &chaosDetails, clients, err); resultErr != nil { + log.Fatalf("helper pod failed, err: %v, resultErr: %v", err, resultErr) + } log.Fatalf("helper pod failed, err: %v", err) } } @@ -52,9 +57,9 @@ func Helper(clients clients.ClientSets) { // it will kill the container till the chaos duration // the execution will stop after timestamp passes the given chaos duration func killContainer(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error { - targetList, err := common.ParseTargets() + targetList, err := common.ParseTargets(chaosDetails.ChaosPodName) if err != nil { - return err + return stacktrace.Propagate(err, "could not parse targets") } var targets []targetDetails @@ -64,6 +69,7 @@ func killContainer(experimentsDetails *experimentTypes.ExperimentDetails, client Name: t.Name, Namespace: t.Namespace, TargetContainer: t.TargetContainer, + Source: chaosDetails.ChaosPodName, } targets = append(targets, td) log.Infof("Injecting chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer) @@ -90,12 +96,12 @@ func killIterations(targets []targetDetails, experimentsDetails *experimentTypes for _, t := range targets { t.RestartCountBefore, err = getRestartCount(t, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could get container restart count") } - containerId, err := common.GetContainerID(t.Namespace, t.Name, t.TargetContainer, clients) + containerId, err := common.GetContainerID(t.Namespace, t.Name, t.TargetContainer, clients, t.Source) if err != nil { - return err + return stacktrace.Propagate(err, "could not get container id") } log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ @@ -108,7 +114,7 @@ func killIterations(targets []targetDetails, experimentsDetails *experimentTypes } if err := kill(experimentsDetails, containerIds, clients, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not kill target container") } //Waiting for the chaos interval after chaos injection @@ -119,10 +125,10 @@ func killIterations(targets []targetDetails, experimentsDetails *experimentTypes for _, t := range targets { if err := validate(t, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not verify restart count") } if err := result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "targeted", "pod", t.Name); err != nil { - return err + return stacktrace.Propagate(err, "could not annotate chaosresult") } } @@ -142,23 +148,22 @@ func kill(experimentsDetails *experimentTypes.ExperimentDetails, containerIds [] switch experimentsDetails.ContainerRuntime { case "docker": - if err := stopDockerContainer(containerIds, experimentsDetails.SocketPath, experimentsDetails.Signal); err != nil { - return err + if err := stopDockerContainer(containerIds, experimentsDetails.SocketPath, experimentsDetails.Signal, experimentsDetails.ChaosPodName); err != nil { + return stacktrace.Propagate(err, "could not stop container") } case "containerd", "crio": - if err := stopContainerdContainer(containerIds, experimentsDetails.SocketPath, experimentsDetails.Signal); err != nil { - return err + if err := stopContainerdContainer(containerIds, experimentsDetails.SocketPath, experimentsDetails.Signal, experimentsDetails.ChaosPodName); err != nil { + return stacktrace.Propagate(err, "could not stop container") } default: - return errors.Errorf("%v container runtime not supported", experimentsDetails.ContainerRuntime) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: chaosDetails.ChaosPodName, Reason: fmt.Sprintf("unsupported container runtime %s", experimentsDetails.ContainerRuntime)} } return nil } func validate(t targetDetails, timeout, delay int, clients clients.ClientSets) error { //Check the status of restarted container - err := common.CheckContainerStatus(t.Namespace, t.Name, timeout, delay, clients) - if err != nil { + if err := common.CheckContainerStatus(t.Namespace, t.Name, timeout, delay, clients, t.Source); err != nil { return err } @@ -167,9 +172,9 @@ func validate(t targetDetails, timeout, delay int, clients clients.ClientSets) e } //stopContainerdContainer kill the application container -func stopContainerdContainer(containerIDs []string, socketPath, signal string) error { +func stopContainerdContainer(containerIDs []string, socketPath, signal, source string) error { if signal != "SIGKILL" && signal != "SIGTERM" { - return errors.Errorf("{%v} signal not supported, use either SIGTERM or SIGKILL", signal) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: source, Reason: fmt.Sprintf("unsupported signal %s, use either SIGTERM or SIGKILL", signal)} } cmd := exec.Command("sudo", "crictl", "-i", fmt.Sprintf("unix://%s", socketPath), "-r", fmt.Sprintf("unix://%s", socketPath), "stop") @@ -178,22 +183,24 @@ func stopContainerdContainer(containerIDs []string, socketPath, signal string) e } cmd.Args = append(cmd.Args, containerIDs...) - var errOut bytes.Buffer + var errOut, out bytes.Buffer cmd.Stderr = &errOut + cmd.Stdout = &out if err := cmd.Run(); err != nil { - return errors.Errorf("Unable to run command, err: %v; error output: %v", err, errOut.String()) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: source, Reason: fmt.Sprintf("failed to stop container :%s", out.String())} } return nil } //stopDockerContainer kill the application container -func stopDockerContainer(containerIDs []string, socketPath, signal string) error { - var errOut bytes.Buffer +func stopDockerContainer(containerIDs []string, socketPath, signal, source string) error { + var errOut, out bytes.Buffer cmd := exec.Command("sudo", "docker", "--host", fmt.Sprintf("unix://%s", socketPath), "kill", "--signal", signal) cmd.Args = append(cmd.Args, containerIDs...) cmd.Stderr = &errOut + cmd.Stdout = &out if err := cmd.Run(); err != nil { - return errors.Errorf("Unable to run command, err: %v; error output: %v", err, errOut.String()) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: source, Reason: fmt.Sprintf("failed to stop container :%s", out.String())} } return nil } @@ -202,7 +209,7 @@ func stopDockerContainer(containerIDs []string, socketPath, signal string) error func getRestartCount(target targetDetails, clients clients.ClientSets) (int, error) { pod, err := clients.KubeClient.CoreV1().Pods(target.Namespace).Get(context.Background(), target.Name, v1.GetOptions{}) if err != nil { - return 0, err + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: target.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", target.Name, target.Namespace), Reason: err.Error()} } restartCount := 0 for _, container := range pod.Status.ContainerStatuses { @@ -224,7 +231,7 @@ func verifyRestartCount(t targetDetails, timeout, delay int, clients clients.Cli Try(func(attempt uint) error { pod, err := clients.KubeClient.CoreV1().Pods(t.Namespace).Get(context.Background(), t.Name, v1.GetOptions{}) if err != nil { - return errors.Errorf("Unable to find the pod with name %v, err: %v", t.Name, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", t.Name, t.Namespace), Reason: err.Error()} } for _, container := range pod.Status.ContainerStatuses { if container.Name == t.TargetContainer { @@ -233,7 +240,7 @@ func verifyRestartCount(t targetDetails, timeout, delay int, clients clients.Cli } } if restartCountAfter <= restartCountBefore { - return errors.Errorf("Target container is not restarted") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: "target container is not restarted after kill"} } log.Infof("restartCount of target container after chaos injection: %v", strconv.Itoa(restartCountAfter)) return nil @@ -262,4 +269,5 @@ type targetDetails struct { Namespace string TargetContainer string RestartCountBefore int + Source string } diff --git a/chaoslib/litmus/container-kill/lib/container-kill.go b/chaoslib/litmus/container-kill/lib/container-kill.go index 267871545..45da87235 100644 --- a/chaoslib/litmus/container-kill/lib/container-kill.go +++ b/chaoslib/litmus/container-kill/lib/container-kill.go @@ -3,6 +3,8 @@ package lib import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "strconv" "strings" @@ -13,20 +15,19 @@ import ( "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//PrepareContainerKill contains the prepration steps before chaos injection +//PrepareContainerKill contains the preparation steps before chaos injection func PrepareContainerKill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { var err error // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"} } //Setup the tunables if provided in range SetChaosTunables(experimentsDetails) @@ -38,7 +39,7 @@ func PrepareContainerKill(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } //Waiting for the ramp time before chaos injection @@ -51,28 +52,28 @@ func PrepareContainerKill(experimentsDetails *experimentTypes.ExperimentDetails, if experimentsDetails.ChaosServiceAccount == "" { experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients) if err != nil { - return errors.Errorf("unable to get the serviceAccountName, err: %v", err) + return stacktrace.Propagate(err, "could not experiment service account") } } if experimentsDetails.EngineName != "" { if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not set helper data") } } - experimentsDetails.IsTargetContainerProvided = (experimentsDetails.TargetContainer != "") + experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection @@ -85,7 +86,6 @@ func PrepareContainerKill(experimentsDetails *experimentTypes.ExperimentDetails, // injectChaosInSerialMode kill the container of all target application serially (one by one) func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { @@ -98,16 +98,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Get the target container name of the application pod if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } + experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name } runID := common.GetRunID() if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) @@ -116,7 +113,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Info("[Status]: Checking the status of the helper pods") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pods are not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } // Wait till the completion of the helper pod @@ -125,13 +122,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, experimentsDetails.ChaosNamespace, true) } //Deleting all the helper pod for container-kill chaos log.Info("[Cleanup]: Deleting all the helper pods") if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } } return nil @@ -156,7 +153,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } } @@ -166,7 +163,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Info("[Status]: Checking the status of the helper pods") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pods are not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } // Wait till the completion of the helper pod @@ -175,13 +172,13 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, experimentsDetails.ChaosNamespace, true) } //Deleting all the helper pod for container-kill chaos log.Info("[Cleanup]: Deleting all the helper pods") if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } return nil @@ -248,7 +245,10 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil } // getPodEnv derive all the env required for the helper pod diff --git a/chaoslib/litmus/disk-fill/helper/disk-fill.go b/chaoslib/litmus/disk-fill/helper/disk-fill.go index b20c31fdb..474f93f87 100644 --- a/chaoslib/litmus/disk-fill/helper/disk-fill.go +++ b/chaoslib/litmus/disk-fill/helper/disk-fill.go @@ -3,6 +3,8 @@ package helper import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "os" "os/exec" "os/signal" @@ -18,7 +20,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/result" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,8 +31,6 @@ var inject, abort chan os.Signal // Helper injects the disk-fill chaos func Helper(clients clients.ClientSets) { - var err error - experimentsDetails := experimentTypes.ExperimentDetails{} eventsDetails := types.EventDetails{} chaosDetails := types.ChaosDetails{} @@ -53,6 +52,7 @@ func Helper(clients clients.ClientSets) { // Intialise the chaos attributes types.InitialiseChaosVariables(&chaosDetails) + chaosDetails.Phase = types.ChaosInjectPhase // Intialise Chaos Result Parameters types.SetResultAttributes(&resultDetails, chaosDetails) @@ -60,7 +60,11 @@ func Helper(clients clients.ClientSets) { // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - if err = diskFill(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails); err != nil { + if err := diskFill(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails); err != nil { + // update failstep inside chaosresult + if resultErr := result.UpdateFailedStepFromHelper(&resultDetails, &chaosDetails, clients, err); resultErr != nil { + log.Fatalf("helper pod failed, err: %v, resultErr: %v", err, resultErr) + } log.Fatalf("helper pod failed, err: %v", err) } } @@ -68,9 +72,9 @@ func Helper(clients clients.ClientSets) { // diskFill contains steps to inject disk-fill chaos func diskFill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error { - targetList, err := common.ParseTargets() + targetList, err := common.ParseTargets(chaosDetails.ChaosPodName) if err != nil { - return err + return stacktrace.Propagate(err, "could not parse targets") } var targets []targetDetails @@ -80,23 +84,24 @@ func diskFill(experimentsDetails *experimentTypes.ExperimentDetails, clients cli Name: t.Name, Namespace: t.Namespace, TargetContainer: t.TargetContainer, + Source: chaosDetails.ChaosPodName, } // Derive the container id of the target container - td.ContainerId, err = common.GetContainerID(td.Namespace, td.Name, td.TargetContainer, clients) + td.ContainerId, err = common.GetContainerID(td.Namespace, td.Name, td.TargetContainer, clients, chaosDetails.ChaosPodName) if err != nil { - return err + return stacktrace.Propagate(err, "could not get container id") } // extract out the pid of the target container - td.TargetPID, err = common.GetPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath) + td.TargetPID, err = common.GetPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath, td.Source) if err != nil { return err } td.SizeToFill, err = getDiskSizeToFill(td, experimentsDetails, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not get disk size to fill") } log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ @@ -125,17 +130,18 @@ func diskFill(experimentsDetails *experimentTypes.ExperimentDetails, clients cli os.Exit(1) default: } + for _, t := range targets { if t.SizeToFill > 0 { - if err = fillDisk(t.TargetPID, t.SizeToFill, experimentsDetails.DataBlockSize); err != nil { - return err + if err := fillDisk(t, experimentsDetails.DataBlockSize); err != nil { + return stacktrace.Propagate(err, "could not fill ephemeral storage") } log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer) if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "injected", "pod", t.Name); err != nil { if revertErr := revertDiskFill(t, clients); revertErr != nil { - return revertErr + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(revertErr).Error())} } - return err + return stacktrace.Propagate(err, "could not annotate chaosresult") } } else { log.Warn("No required free space found!") @@ -153,8 +159,7 @@ func diskFill(experimentsDetails *experimentTypes.ExperimentDetails, clients cli for _, t := range targets { // It will delete the target pod if target pod is evicted // if target pod is still running then it will delete all the files, which was created earlier during chaos execution - err = revertDiskFill(t, clients) - if err != nil { + if err = revertDiskFill(t, clients); err != nil { errList = append(errList, err.Error()) continue } @@ -164,22 +169,24 @@ func diskFill(experimentsDetails *experimentTypes.ExperimentDetails, clients cli } if len(errList) != 0 { - return fmt.Errorf("failed to revert chaos, err: %v", strings.Join(errList, ",")) + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))} } - return nil } // fillDisk fill the ephemeral disk by creating files -func fillDisk(targetPID int, sizeTobeFilled, bs int) error { +func fillDisk(t targetDetails, bs int) error { // Creating files to fill the required ephemeral storage size of block size of 4K - log.Infof("[Fill]: Filling ephemeral storage, size: %vKB", sizeTobeFilled) - dd := fmt.Sprintf("sudo dd if=/dev/urandom of=/proc/%v/root/home/diskfill bs=%vK count=%v", targetPID, bs, strconv.Itoa(sizeTobeFilled/bs)) + log.Infof("[Fill]: Filling ephemeral storage, size: %vKB", t.SizeToFill) + dd := fmt.Sprintf("sudo dd if=/dev/urandom of=/proc/%v/root/home/diskfill bs=%vK count=%v", t.TargetPID, bs, strconv.Itoa(t.SizeToFill/bs)) log.Infof("dd: {%v}", dd) cmd := exec.Command("/bin/bash", "-c", dd) - _, err := cmd.CombinedOutput() - return err + out, err := cmd.CombinedOutput() + if err != nil { + log.Error(err.Error()) + } + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: string(out)} } // getEphemeralStorageAttributes derive the ephemeral storage attributes from the target pod @@ -187,7 +194,7 @@ func getEphemeralStorageAttributes(t targetDetails, clients clients.ClientSets) pod, err := clients.KubeClient.CoreV1().Pods(t.Namespace).Get(context.Background(), t.Name, v1.GetOptions{}) if err != nil { - return 0, err + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", t.Name, t.Namespace), Reason: err.Error()} } var ephemeralStorageLimit int64 @@ -213,7 +220,7 @@ func filterUsedEphemeralStorage(ephemeralStorageDetails string) (int, error) { ephemeralStorageAll := strings.Split(ephemeralStorageDetails, "\n") // It will return the details of main directory ephemeralStorageAllDiskFill := strings.Split(ephemeralStorageAll[len(ephemeralStorageAll)-2], "\t")[0] - // type casting string to interger + // type casting string to integer ephemeralStorageSize, err := strconv.Atoi(ephemeralStorageAllDiskFill) return ephemeralStorageSize, err } @@ -241,14 +248,14 @@ func getSizeToBeFilled(experimentsDetails *experimentTypes.ExperimentDetails, us func revertDiskFill(t targetDetails, clients clients.ClientSets) error { pod, err := clients.KubeClient.CoreV1().Pods(t.Namespace).Get(context.Background(), t.Name, v1.GetOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: t.Source, Target: fmt.Sprintf("{podName: %s,namespace: %s}", t.Name, t.Namespace), Reason: err.Error()} } podReason := pod.Status.Reason if podReason == "Evicted" { // Deleting the pod as pod is already evicted log.Warn("Target pod is evicted, deleting the pod") if err := clients.KubeClient.CoreV1().Pods(t.Namespace).Delete(context.Background(), t.Name, v1.DeleteOptions{}); err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: t.Source, Target: fmt.Sprintf("{podName: %s,namespace: %s}", t.Name, t.Namespace), Reason: fmt.Sprintf("failed to delete target pod after eviction :%s", err.Error())} } } else { // deleting the files after chaos execution @@ -256,8 +263,8 @@ func revertDiskFill(t targetDetails, clients clients.ClientSets) error { cmd := exec.Command("/bin/bash", "-c", rm) out, err := cmd.CombinedOutput() if err != nil { - log.Error(string(out)) - return err + log.Error(err.Error()) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: t.Source, Target: fmt.Sprintf("{podName: %s,namespace: %s}", t.Name, t.Namespace), Reason: fmt.Sprintf("failed to cleanup ephemeral storage: %s", string(out))} } } log.Infof("successfully reverted chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer) @@ -308,19 +315,20 @@ func abortWatcher(targets []targetDetails, experimentsDetails *experimentTypes.E } func getDiskSizeToFill(t targetDetails, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (int, error) { - usedEphemeralStorageSize, err := getUsedEphemeralStorage(t.TargetPID) + + usedEphemeralStorageSize, err := getUsedEphemeralStorage(t) if err != nil { - return 0, err + return 0, stacktrace.Propagate(err, "could not get used ephemeral storage") } // GetEphemeralStorageAttributes derive the ephemeral storage attributes from the target container ephemeralStorageLimit, err := getEphemeralStorageAttributes(t, clients) if err != nil { - return 0, err + return 0, stacktrace.Propagate(err, "could not get ephemeral storage attributes") } if ephemeralStorageLimit == 0 && experimentsDetails.EphemeralStorageMebibytes == "0" { - return 0, errors.Errorf("either provide ephemeral storage limit inside target container or define EPHEMERAL_STORAGE_MEBIBYTES ENV") + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", t.Name, t.Namespace), Reason: "either provide ephemeral storage limit inside target container or define EPHEMERAL_STORAGE_MEBIBYTES ENV"} } // deriving the ephemeral storage size to be filled @@ -329,21 +337,21 @@ func getDiskSizeToFill(t targetDetails, experimentsDetails *experimentTypes.Expe return sizeTobeFilled, nil } -func getUsedEphemeralStorage(targetPID int) (int, error) { +func getUsedEphemeralStorage(t targetDetails) (int, error) { // derive the used ephemeral storage size from the target container - du := fmt.Sprintf("sudo du /proc/%v/root", targetPID) + du := fmt.Sprintf("sudo du /proc/%v/root", t.TargetPID) cmd := exec.Command("/bin/bash", "-c", du) out, err := cmd.CombinedOutput() if err != nil { - log.Error(string(out)) - return 0, err + log.Error(err.Error()) + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("failed to get used ephemeral storage size: %s", string(out))} } ephemeralStorageDetails := string(out) // filtering out the used ephemeral storage from the output of du command usedEphemeralStorageSize, err := filterUsedEphemeralStorage(ephemeralStorageDetails) if err != nil { - return 0, errors.Errorf("unable to filter used ephemeral storage size, err: %v", err) + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("failed to get used ephemeral storage size: %s", err.Error())} } log.Infof("used ephemeral storage space: %vKB", strconv.Itoa(usedEphemeralStorageSize)) return usedEphemeralStorageSize, nil @@ -356,4 +364,5 @@ type targetDetails struct { ContainerId string SizeToFill int TargetPID int + Source string } diff --git a/chaoslib/litmus/disk-fill/lib/disk-fill.go b/chaoslib/litmus/disk-fill/lib/disk-fill.go index 1bc182ccc..62fc79aeb 100644 --- a/chaoslib/litmus/disk-fill/lib/disk-fill.go +++ b/chaoslib/litmus/disk-fill/lib/disk-fill.go @@ -3,6 +3,8 @@ package lib import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "strconv" "strings" @@ -14,24 +16,23 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/litmuschaos/litmus-go/pkg/utils/exec" - "github.com/pkg/errors" "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PrepareDiskFill contains the prepration steps before chaos injection +//PrepareDiskFill contains the preparation steps before chaos injection func PrepareDiskFill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { var err error - // It will contains all the pod & container details required for exec command + // It will contain all the pod & container details required for exec command execCommandDetails := exec.PodDetails{} // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"} } - //setup the tunables if provided in range + //set up the tunables if provided in range setChaosTunables(experimentsDetails) log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{ @@ -43,7 +44,7 @@ func PrepareDiskFill(experimentsDetails *experimentTypes.ExperimentDetails, clie targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } //Waiting for the ramp time before chaos injection @@ -56,28 +57,28 @@ func PrepareDiskFill(experimentsDetails *experimentTypes.ExperimentDetails, clie if experimentsDetails.ChaosServiceAccount == "" { experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients) if err != nil { - return errors.Errorf("unable to get the serviceAccountName, err: %v", err) + return stacktrace.Propagate(err, "could not experiment service account") } } if experimentsDetails.EngineName != "" { if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not set helper data") } } - experimentsDetails.IsTargetContainerProvided = (experimentsDetails.TargetContainer != "") + experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, execCommandDetails, resultDetails, eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, execCommandDetails, resultDetails, eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection @@ -91,7 +92,6 @@ func PrepareDiskFill(experimentsDetails *experimentTypes.ExperimentDetails, clie // injectChaosInSerialMode fill the ephemeral storage of all target application serially (one by one) func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, execCommandDetails exec.PodDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { @@ -104,15 +104,12 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Get the target container name of the application pod if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } + experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name } runID := common.GetRunID() if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) @@ -121,7 +118,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Info("[Status]: Checking the status of the helper pods") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pods are not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } // Wait till the completion of the helper pod @@ -130,13 +127,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true) } //Deleting all the helper pod for disk-fill chaos log.Info("[Cleanup]: Deleting the helper pod") if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } } @@ -165,7 +162,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } } @@ -175,7 +172,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Info("[Status]: Checking the status of the helper pods") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pods are not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } // Wait till the completion of the helper pod @@ -184,13 +181,13 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true) } //Deleting all the helper pod for disk-fill chaos log.Info("[Cleanup]: Deleting all the helper pod") if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } return nil @@ -256,7 +253,10 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil } // getPodEnv derive all the env required for the helper pod diff --git a/chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go b/chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go index 282c82ad8..cb9c40bf4 100644 --- a/chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go +++ b/chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go @@ -5,6 +5,9 @@ import ( "fmt" "strconv" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" + clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/docker-service-kill/types" @@ -13,7 +16,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -27,7 +29,7 @@ func PrepareDockerServiceKill(experimentsDetails *experimentTypes.ExperimentDeta //Select node for docker-service-kill experimentsDetails.TargetNode, err = common.GetNodeName(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.NodeLabel, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not get node name") } } @@ -51,13 +53,13 @@ func PrepareDockerServiceKill(experimentsDetails *experimentTypes.ExperimentDeta if experimentsDetails.EngineName != "" { if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not set helper data") } } // Creating the helper pod to perform docker-service-kill if err = createHelperPod(experimentsDetails, clients, chaosDetails, experimentsDetails.TargetNode); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID) @@ -66,7 +68,7 @@ func PrepareDockerServiceKill(experimentsDetails *experimentTypes.ExperimentDeta log.Info("[Status]: Checking the status of the helper pod") if err = status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } // run the probes during chaos @@ -81,7 +83,7 @@ func PrepareDockerServiceKill(experimentsDetails *experimentTypes.ExperimentDeta log.Info("[Status]: Check for the node to be in NotReady state") if err = status.CheckNodeNotReadyState(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients) - return errors.Errorf("application node is not in NotReady state, err: %v", err) + return stacktrace.Propagate(err, "could not check for NOT READY state") } // Wait till the completion of helper pod @@ -90,13 +92,13 @@ func PrepareDockerServiceKill(experimentsDetails *experimentTypes.ExperimentDeta podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false) } //Deleting the helper pod log.Info("[Cleanup]: Deleting the helper pod") if err = common.DeletePod(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod") } //Waiting for the ramp time after chaos injection @@ -190,7 +192,10 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil } func ptrint64(p int64) *int64 { diff --git a/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib/ebs-loss-by-id.go b/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib/ebs-loss-by-id.go index a07c8aa5a..b21a24de4 100644 --- a/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib/ebs-loss-by-id.go +++ b/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib/ebs-loss-by-id.go @@ -1,18 +1,20 @@ package lib import ( + "fmt" "os" "os/signal" "strings" "syscall" ebsloss "github.com/litmuschaos/litmus-go/chaoslib/litmus/ebs-loss/lib" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) var ( @@ -20,7 +22,7 @@ var ( inject, abort chan os.Signal ) -//PrepareEBSLossByID contains the prepration and injection steps for the experiment +// PrepareEBSLossByID contains the prepration and injection steps for the experiment func PrepareEBSLossByID(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // inject channel is used to transmit signal notifications. @@ -48,7 +50,7 @@ func PrepareEBSLossByID(experimentsDetails *experimentTypes.ExperimentDetails, c //get the volume id or list of instance ids volumeIDList := strings.Split(experimentsDetails.EBSVolumeID, ",") if len(volumeIDList) == 0 { - return errors.Errorf("no volume id found to detach") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no volume id found to detach"} } // watching for the abort signal and revert the chaos go ebsloss.AbortWatcher(experimentsDetails, volumeIDList, abort, chaosDetails) @@ -56,14 +58,14 @@ func PrepareEBSLossByID(experimentsDetails *experimentTypes.ExperimentDetails, c switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = ebsloss.InjectChaosInSerialMode(experimentsDetails, volumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = ebsloss.InjectChaosInParallelMode(experimentsDetails, volumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection diff --git a/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib/ebs-loss-by-tag.go b/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib/ebs-loss-by-tag.go index f2e63d892..0b2039c3e 100644 --- a/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib/ebs-loss-by-tag.go +++ b/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib/ebs-loss-by-tag.go @@ -1,18 +1,20 @@ package lib import ( + "fmt" "os" "os/signal" "strings" "syscall" ebsloss "github.com/litmuschaos/litmus-go/chaoslib/litmus/ebs-loss/lib" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) var ( @@ -20,7 +22,7 @@ var ( inject, abort chan os.Signal ) -//PrepareEBSLossByTag contains the prepration and injection steps for the experiment +// PrepareEBSLossByTag contains the prepration and injection steps for the experiment func PrepareEBSLossByTag(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // inject channel is used to transmit signal notifications. @@ -54,14 +56,14 @@ func PrepareEBSLossByTag(experimentsDetails *experimentTypes.ExperimentDetails, switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = ebsloss.InjectChaosInSerialMode(experimentsDetails, targetEBSVolumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = ebsloss.InjectChaosInParallelMode(experimentsDetails, targetEBSVolumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection if experimentsDetails.RampTime != 0 { diff --git a/chaoslib/litmus/ebs-loss/lib/ebs-loss.go b/chaoslib/litmus/ebs-loss/lib/ebs-loss.go index 6baf3a3b1..8fd39c06b 100644 --- a/chaoslib/litmus/ebs-loss/lib/ebs-loss.go +++ b/chaoslib/litmus/ebs-loss/lib/ebs-loss.go @@ -1,9 +1,11 @@ package lib import ( + "fmt" "os" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" ebs "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ebs" "github.com/litmuschaos/litmus-go/pkg/events" @@ -12,10 +14,10 @@ import ( "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) -//InjectChaosInSerialMode will inject the ebs loss chaos in serial mode which means one after other +// InjectChaosInSerialMode will inject the ebs loss chaos in serial mode which means one after other func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetEBSVolumeIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin @@ -34,13 +36,13 @@ func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Get volume attachment details ec2InstanceID, device, err := ebs.GetVolumeAttachmentDetails(volumeID, experimentsDetails.VolumeTag, experimentsDetails.Region) if err != nil { - return errors.Errorf("fail to get the attachment info, err: %v", err) + return stacktrace.Propagate(err, "failed to get the attachment info") } //Detaching the ebs volume from the instance log.Info("[Chaos]: Detaching the EBS volume from the instance") if err = ebs.EBSVolumeDetach(volumeID, experimentsDetails.Region); err != nil { - return errors.Errorf("ebs detachment failed, err: %v", err) + return stacktrace.Propagate(err, "ebs detachment failed") } common.SetTargets(volumeID, "injected", "EBS", chaosDetails) @@ -48,14 +50,14 @@ func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Wait for ebs volume detachment log.Infof("[Wait]: Wait for EBS volume detachment for volume %v", volumeID) if err = ebs.WaitForVolumeDetachment(volumeID, ec2InstanceID, experimentsDetails.Region, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - return errors.Errorf("unable to detach the ebs volume to the ec2 instance, err: %v", err) + return stacktrace.Propagate(err, "ebs detachment failed") } // run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -66,7 +68,7 @@ func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Getting the EBS volume attachment status ebsState, err := ebs.GetEBSStatus(volumeID, ec2InstanceID, experimentsDetails.Region) if err != nil { - return errors.Errorf("failed to get the ebs status, err: %v", err) + return stacktrace.Propagate(err, "failed to get the ebs status") } switch ebsState { @@ -76,13 +78,13 @@ func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Attaching the ebs volume from the instance log.Info("[Chaos]: Attaching the EBS volume back to the instance") if err = ebs.EBSVolumeAttach(volumeID, ec2InstanceID, device, experimentsDetails.Region); err != nil { - return errors.Errorf("ebs attachment failed, err: %v", err) + return stacktrace.Propagate(err, "ebs attachment failed") } //Wait for ebs volume attachment log.Infof("[Wait]: Wait for EBS volume attachment for %v volume", volumeID) if err = ebs.WaitForVolumeAttachment(volumeID, ec2InstanceID, experimentsDetails.Region, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - return errors.Errorf("unable to attach the ebs volume to the ec2 instance, err: %v", err) + return stacktrace.Propagate(err, "ebs attachment failed") } } common.SetTargets(volumeID, "reverted", "EBS", chaosDetails) @@ -92,7 +94,7 @@ func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai return nil } -//InjectChaosInParallelMode will inject the chaos in parallel mode that means all at once +// InjectChaosInParallelMode will inject the chaos in parallel mode that means all at once func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetEBSVolumeIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { var ec2InstanceIDList, deviceList []string @@ -112,8 +114,15 @@ func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //prepare the instaceIDs and device name for all the given volume for _, volumeID := range targetEBSVolumeIDList { ec2InstanceID, device, err := ebs.GetVolumeAttachmentDetails(volumeID, experimentsDetails.VolumeTag, experimentsDetails.Region) - if err != nil || ec2InstanceID == "" || device == "" { - return errors.Errorf("fail to get the attachment info, err: %v", err) + if err != nil { + return stacktrace.Propagate(err, "failed to get the attachment info") + } + if ec2InstanceID == "" || device == "" { + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: "Volume not attached to any instance", + Target: fmt.Sprintf("EBS Volume ID: %v", volumeID), + } } ec2InstanceIDList = append(ec2InstanceIDList, ec2InstanceID) deviceList = append(deviceList, device) @@ -123,28 +132,28 @@ func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Detaching the ebs volume from the instance log.Info("[Chaos]: Detaching the EBS volume from the instance") if err := ebs.EBSVolumeDetach(volumeID, experimentsDetails.Region); err != nil { - return errors.Errorf("ebs detachment failed, err: %v", err) + return stacktrace.Propagate(err, "ebs detachment failed") } common.SetTargets(volumeID, "injected", "EBS", chaosDetails) } log.Info("[Info]: Checking if the detachment process initiated") if err := ebs.CheckEBSDetachmentInitialisation(targetEBSVolumeIDList, ec2InstanceIDList, experimentsDetails.Region); err != nil { - return errors.Errorf("fail to initialise the detachment") + return stacktrace.Propagate(err, "failed to initialise the detachment") } for i, volumeID := range targetEBSVolumeIDList { //Wait for ebs volume detachment log.Infof("[Wait]: Wait for EBS volume detachment for volume %v", volumeID) if err := ebs.WaitForVolumeDetachment(volumeID, ec2InstanceIDList[i], experimentsDetails.Region, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - return errors.Errorf("unable to detach the ebs volume to the ec2 instance, err: %v", err) + return stacktrace.Propagate(err, "ebs detachment failed") } } // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -157,7 +166,7 @@ func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Getting the EBS volume attachment status ebsState, err := ebs.GetEBSStatus(volumeID, ec2InstanceIDList[i], experimentsDetails.Region) if err != nil { - return errors.Errorf("failed to get the ebs status, err: %v", err) + return stacktrace.Propagate(err, "failed to get the ebs status") } switch ebsState { @@ -167,13 +176,13 @@ func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Attaching the ebs volume from the instance log.Info("[Chaos]: Attaching the EBS volume from the instance") if err = ebs.EBSVolumeAttach(volumeID, ec2InstanceIDList[i], deviceList[i], experimentsDetails.Region); err != nil { - return errors.Errorf("ebs attachment failed, err: %v", err) + return stacktrace.Propagate(err, "ebs attachment failed") } //Wait for ebs volume attachment log.Infof("[Wait]: Wait for EBS volume attachment for volume %v", volumeID) if err = ebs.WaitForVolumeAttachment(volumeID, ec2InstanceIDList[i], experimentsDetails.Region, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - return errors.Errorf("unable to attach the ebs volume to the ec2 instance, err: %v", err) + return stacktrace.Propagate(err, "ebs attachment failed") } } common.SetTargets(volumeID, "reverted", "EBS", chaosDetails) @@ -193,13 +202,13 @@ func AbortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, volumeI //Get volume attachment details instanceID, deviceName, err := ebs.GetVolumeAttachmentDetails(volumeID, experimentsDetails.VolumeTag, experimentsDetails.Region) if err != nil { - log.Errorf("fail to get the attachment info, err: %v", err) + log.Errorf("Failed to get the attachment info: %v", err) } //Getting the EBS volume attachment status ebsState, err := ebs.GetEBSStatus(experimentsDetails.EBSVolumeID, instanceID, experimentsDetails.Region) if err != nil { - log.Errorf("failed to get the ebs status when an abort signal is received, err: %v", err) + log.Errorf("Failed to get the ebs status when an abort signal is received: %v", err) } if ebsState != "attached" { @@ -207,13 +216,13 @@ func AbortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, volumeI //We first wait for the volume to get in detached state then we are attaching it. log.Info("[Abort]: Wait for EBS complete volume detachment") if err = ebs.WaitForVolumeDetachment(experimentsDetails.EBSVolumeID, instanceID, experimentsDetails.Region, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - log.Errorf("unable to detach the ebs volume, err: %v", err) + log.Errorf("Unable to detach the ebs volume: %v", err) } //Attaching the ebs volume from the instance log.Info("[Chaos]: Attaching the EBS volume from the instance") err = ebs.EBSVolumeAttach(experimentsDetails.EBSVolumeID, instanceID, deviceName, experimentsDetails.Region) if err != nil { - log.Errorf("ebs attachment failed when an abort signal is received, err: %v", err) + log.Errorf("EBS attachment failed when an abort signal is received: %v", err) } } common.SetTargets(volumeID, "reverted", "EBS", chaosDetails) diff --git a/chaoslib/litmus/ec2-terminate-by-id/lib/ec2-terminate-by-id.go b/chaoslib/litmus/ec2-terminate-by-id/lib/ec2-terminate-by-id.go index 62c76a2e3..2472b9467 100644 --- a/chaoslib/litmus/ec2-terminate-by-id/lib/ec2-terminate-by-id.go +++ b/chaoslib/litmus/ec2-terminate-by-id/lib/ec2-terminate-by-id.go @@ -1,12 +1,14 @@ package lib import ( + "fmt" "os" "os/signal" "strings" "syscall" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" awslib "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2" "github.com/litmuschaos/litmus-go/pkg/events" @@ -15,7 +17,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) var ( @@ -23,7 +25,7 @@ var ( inject, abort chan os.Signal ) -//PrepareEC2TerminateByID contains the prepration and injection steps for the experiment +// PrepareEC2TerminateByID contains the prepration and injection steps for the experiment func PrepareEC2TerminateByID(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // inject channel is used to transmit signal notifications. @@ -44,8 +46,8 @@ func PrepareEC2TerminateByID(experimentsDetails *experimentTypes.ExperimentDetai //get the instance id or list of instance ids instanceIDList := strings.Split(experimentsDetails.Ec2InstanceID, ",") - if len(instanceIDList) == 0 { - return errors.Errorf("no instance id found to terminate") + if experimentsDetails.Ec2InstanceID == "" || len(instanceIDList) == 0 { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no EC2 instance ID found to terminate"} } // watching for the abort signal and revert the chaos @@ -54,14 +56,14 @@ func PrepareEC2TerminateByID(experimentsDetails *experimentTypes.ExperimentDetai switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection @@ -72,7 +74,7 @@ func PrepareEC2TerminateByID(experimentsDetails *experimentTypes.ExperimentDetai return nil } -//injectChaosInSerialMode will inject the ec2 instance termination in serial mode that is one after other +// injectChaosInSerialMode will inject the ec2 instance termination in serial mode that is one after other func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { select { @@ -100,7 +102,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Stopping the EC2 instance log.Info("[Chaos]: Stopping the desired EC2 instance") if err := awslib.EC2Stop(id, experimentsDetails.Region); err != nil { - return errors.Errorf("ec2 instance failed to stop, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to stop") } common.SetTargets(id, "injected", "EC2", chaosDetails) @@ -108,14 +110,14 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Wait for ec2 instance to completely stop log.Infof("[Wait]: Wait for EC2 instance '%v' to get in stopped state", id) if err := awslib.WaitForEC2Down(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil { - return errors.Errorf("unable to stop the ec2 instance, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to stop") } // run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -127,13 +129,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai if experimentsDetails.ManagedNodegroup != "enable" { log.Info("[Chaos]: Starting back the EC2 instance") if err := awslib.EC2Start(id, experimentsDetails.Region); err != nil { - return errors.Errorf("ec2 instance failed to start, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to start") } //Wait for ec2 instance to get in running state log.Infof("[Wait]: Wait for EC2 instance '%v' to get in running state", id) if err := awslib.WaitForEC2Up(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil { - return errors.Errorf("unable to start the ec2 instance, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to start") } } common.SetTargets(id, "reverted", "EC2", chaosDetails) @@ -171,7 +173,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Stopping the EC2 instance log.Info("[Chaos]: Stopping the desired EC2 instance") if err := awslib.EC2Stop(id, experimentsDetails.Region); err != nil { - return errors.Errorf("ec2 instance failed to stop, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to stop") } common.SetTargets(id, "injected", "EC2", chaosDetails) } @@ -180,7 +182,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Wait for ec2 instance to completely stop log.Infof("[Wait]: Wait for EC2 instance '%v' to get in stopped state", id) if err := awslib.WaitForEC2Down(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil { - return errors.Errorf("unable to stop the ec2 instance, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to stop") } common.SetTargets(id, "reverted", "EC2 Instance ID", chaosDetails) } @@ -188,7 +190,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -202,7 +204,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet for _, id := range instanceIDList { log.Info("[Chaos]: Starting back the EC2 instance") if err := awslib.EC2Start(id, experimentsDetails.Region); err != nil { - return errors.Errorf("ec2 instance failed to start, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to start") } } @@ -210,7 +212,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Wait for ec2 instance to get in running state log.Infof("[Wait]: Wait for EC2 instance '%v' to get in running state", id) if err := awslib.WaitForEC2Up(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil { - return errors.Errorf("unable to start the ec2 instance, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to start") } } } @@ -232,19 +234,19 @@ func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, instanc for _, id := range instanceIDList { instanceState, err := awslib.GetEC2InstanceStatus(id, experimentsDetails.Region) if err != nil { - log.Errorf("fail to get instance status when an abort signal is received,err :%v", err) + log.Errorf("Failed to get instance status when an abort signal is received: %v", err) } if instanceState != "running" && experimentsDetails.ManagedNodegroup != "enable" { log.Info("[Abort]: Waiting for the EC2 instance to get down") if err := awslib.WaitForEC2Down(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil { - log.Errorf("unable to wait till stop of the instance, err: %v", err) + log.Errorf("Unable to wait till stop of the instance: %v", err) } log.Info("[Abort]: Starting EC2 instance as abort signal received") err := awslib.EC2Start(id, experimentsDetails.Region) if err != nil { - log.Errorf("ec2 instance failed to start when an abort signal is received, err: %v", err) + log.Errorf("EC2 instance failed to start when an abort signal is received: %v", err) } } common.SetTargets(id, "reverted", "EC2", chaosDetails) diff --git a/chaoslib/litmus/ec2-terminate-by-tag/lib/ec2-terminate-by-tag.go b/chaoslib/litmus/ec2-terminate-by-tag/lib/ec2-terminate-by-tag.go index b078547cd..25e1b3a00 100644 --- a/chaoslib/litmus/ec2-terminate-by-tag/lib/ec2-terminate-by-tag.go +++ b/chaoslib/litmus/ec2-terminate-by-tag/lib/ec2-terminate-by-tag.go @@ -1,12 +1,14 @@ package lib import ( + "fmt" "os" "os/signal" "strings" "syscall" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" awslib "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2" "github.com/litmuschaos/litmus-go/pkg/events" @@ -15,13 +17,13 @@ import ( "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" ) var inject, abort chan os.Signal -//PrepareEC2TerminateByTag contains the prepration and injection steps for the experiment +// PrepareEC2TerminateByTag contains the prepration and injection steps for the experiment func PrepareEC2TerminateByTag(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // inject channel is used to transmit signal notifications. @@ -49,14 +51,14 @@ func PrepareEC2TerminateByTag(experimentsDetails *experimentTypes.ExperimentDeta switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err := injectChaosInSerialMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err := injectChaosInParallelMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection @@ -67,7 +69,7 @@ func PrepareEC2TerminateByTag(experimentsDetails *experimentTypes.ExperimentDeta return nil } -//injectChaosInSerialMode will inject the ce2 instance termination in serial mode that is one after other +// injectChaosInSerialMode will inject the ce2 instance termination in serial mode that is one after other func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { select { @@ -95,7 +97,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Stopping the EC2 instance log.Info("[Chaos]: Stopping the desired EC2 instance") if err := awslib.EC2Stop(id, experimentsDetails.Region); err != nil { - return errors.Errorf("ec2 instance failed to stop, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to stop") } common.SetTargets(id, "injected", "EC2", chaosDetails) @@ -103,14 +105,14 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Wait for ec2 instance to completely stop log.Infof("[Wait]: Wait for EC2 instance '%v' to get in stopped state", id) if err := awslib.WaitForEC2Down(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil { - return errors.Errorf("unable to stop the ec2 instance, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to stop") } // run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -122,13 +124,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai if experimentsDetails.ManagedNodegroup != "enable" { log.Info("[Chaos]: Starting back the EC2 instance") if err := awslib.EC2Start(id, experimentsDetails.Region); err != nil { - return errors.Errorf("ec2 instance failed to start, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to start") } //Wait for ec2 instance to get in running state log.Infof("[Wait]: Wait for EC2 instance '%v' to get in running state", id) if err := awslib.WaitForEC2Up(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil { - return errors.Errorf("unable to start the ec2 instance, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to start") } } common.SetTargets(id, "reverted", "EC2", chaosDetails) @@ -165,7 +167,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Stopping the EC2 instance log.Info("[Chaos]: Stopping the desired EC2 instance") if err := awslib.EC2Stop(id, experimentsDetails.Region); err != nil { - return errors.Errorf("ec2 instance failed to stop, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to stop") } common.SetTargets(id, "injected", "EC2", chaosDetails) } @@ -174,14 +176,14 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Wait for ec2 instance to completely stop log.Infof("[Wait]: Wait for EC2 instance '%v' to get in stopped state", id) if err := awslib.WaitForEC2Down(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil { - return errors.Errorf("unable to stop the ec2 instance, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to stop") } } // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -195,7 +197,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet for _, id := range instanceIDList { log.Info("[Chaos]: Starting back the EC2 instance") if err := awslib.EC2Start(id, experimentsDetails.Region); err != nil { - return errors.Errorf("ec2 instance failed to start, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to start") } } @@ -203,7 +205,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Wait for ec2 instance to get in running state log.Infof("[Wait]: Wait for EC2 instance '%v' to get in running state", id) if err := awslib.WaitForEC2Up(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil { - return errors.Errorf("unable to start the ec2 instance, err: %v", err) + return stacktrace.Propagate(err, "ec2 instance failed to start") } } } @@ -216,21 +218,24 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet return nil } -//SetTargetInstance will select the target instance which are in running state and filtered from the given instance tag +// SetTargetInstance will select the target instance which are in running state and filtered from the given instance tag func SetTargetInstance(experimentsDetails *experimentTypes.ExperimentDetails) error { instanceIDList, err := awslib.GetInstanceList(experimentsDetails.InstanceTag, experimentsDetails.Region) if err != nil { - return err + return stacktrace.Propagate(err, "failed to get the instance id list") } if len(instanceIDList) == 0 { - return errors.Errorf("no instance found with the given tag %v, in region %v", experimentsDetails.InstanceTag, experimentsDetails.Region) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: fmt.Sprintf("no instance found with the given tag %v, in region %v", experimentsDetails.InstanceTag, experimentsDetails.Region), + } } for _, id := range instanceIDList { instanceState, err := awslib.GetEC2InstanceStatus(id, experimentsDetails.Region) if err != nil { - return errors.Errorf("fail to get the instance status while selecting the target instances, err: %v", err) + return stacktrace.Propagate(err, "failed to get the instance status while selecting the target instances") } if instanceState == "running" { experimentsDetails.TargetInstanceIDList = append(experimentsDetails.TargetInstanceIDList, id) @@ -238,7 +243,10 @@ func SetTargetInstance(experimentsDetails *experimentTypes.ExperimentDetails) er } if len(experimentsDetails.TargetInstanceIDList) == 0 { - return errors.Errorf("fail to get any running instance having instance tag: %v", experimentsDetails.InstanceTag) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: "failed to get any running instance", + Target: fmt.Sprintf("EC2 Instance Tag: %v", experimentsDetails.InstanceTag)} } log.InfoWithValues("[Info]: Targeting the running instances filtered from instance tag", logrus.Fields{ @@ -257,19 +265,19 @@ func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, instanc for _, id := range instanceIDList { instanceState, err := awslib.GetEC2InstanceStatus(id, experimentsDetails.Region) if err != nil { - log.Errorf("fail to get instance status when an abort signal is received,err :%v", err) + log.Errorf("Failed to get instance status when an abort signal is received: %v", err) } if instanceState != "running" && experimentsDetails.ManagedNodegroup != "enable" { log.Info("[Abort]: Waiting for the EC2 instance to get down") if err := awslib.WaitForEC2Down(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil { - log.Errorf("unable to wait till stop of the instance, err: %v", err) + log.Errorf("Unable to wait till stop of the instance: %v", err) } log.Info("[Abort]: Starting EC2 instance as abort signal received") err := awslib.EC2Start(id, experimentsDetails.Region) if err != nil { - log.Errorf("ec2 instance failed to start when an abort signal is received, err: %v", err) + log.Errorf("EC2 instance failed to start when an abort signal is received: %v", err) } } common.SetTargets(id, "reverted", "EC2", chaosDetails) diff --git a/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib/gcp-vm-disk-loss-by-label.go b/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib/gcp-vm-disk-loss-by-label.go index fc9c7b8fa..fb6dff6b7 100644 --- a/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib/gcp-vm-disk-loss-by-label.go +++ b/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib/gcp-vm-disk-loss-by-label.go @@ -1,12 +1,14 @@ package lib import ( + "fmt" "os" "os/signal" "strings" "syscall" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" "github.com/litmuschaos/litmus-go/pkg/events" @@ -15,7 +17,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" "google.golang.org/api/compute/v1" ) @@ -62,14 +64,14 @@ func PrepareDiskVolumeLossByLabel(computeService *compute.Service, experimentsDe switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(computeService, experimentsDetails, diskVolumeNamesList, experimentsDetails.TargetDiskInstanceNamesList, experimentsDetails.Zones, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(computeService, experimentsDetails, diskVolumeNamesList, experimentsDetails.TargetDiskInstanceNamesList, experimentsDetails.Zones, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } } @@ -102,7 +104,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails //Detaching the disk volume from the instance log.Info("[Chaos]: Detaching the disk volume from the instance") if err = gcp.DiskVolumeDetach(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zone, experimentsDetails.DeviceNamesList[i]); err != nil { - return errors.Errorf("disk detachment failed, err: %v", err) + return stacktrace.Propagate(err, "disk detachment failed") } common.SetTargets(targetDiskVolumeNamesList[i], "injected", "DiskVolume", chaosDetails) @@ -110,7 +112,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails //Wait for disk volume detachment log.Infof("[Wait]: Wait for disk volume detachment for volume %v", targetDiskVolumeNamesList[i]) if err = gcp.WaitForVolumeDetachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - return errors.Errorf("unable to detach the disk volume from the vm instance, err: %v", err) + return stacktrace.Propagate(err, "unable to detach the disk volume from the vm instance") } // run the probes during chaos @@ -128,7 +130,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails //Getting the disk volume attachment status diskState, err := gcp.GetDiskVolumeState(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone) if err != nil { - return errors.Errorf("failed to get the disk volume status, err: %v", err) + return stacktrace.Propagate(err, "failed to get the disk volume status") } switch diskState { @@ -138,13 +140,13 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails //Attaching the disk volume to the instance log.Info("[Chaos]: Attaching the disk volume back to the instance") if err = gcp.DiskVolumeAttach(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zone, experimentsDetails.DeviceNamesList[i], targetDiskVolumeNamesList[i]); err != nil { - return errors.Errorf("disk attachment failed, err: %v", err) + return stacktrace.Propagate(err, "disk attachment failed") } //Wait for disk volume attachment log.Infof("[Wait]: Wait for disk volume attachment for %v volume", targetDiskVolumeNamesList[i]) if err = gcp.WaitForVolumeAttachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - return errors.Errorf("unable to attach the disk volume to the vm instance, err: %v", err) + return stacktrace.Propagate(err, "unable to attach the disk volume to the vm instance") } } @@ -177,7 +179,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai //Detaching the disk volume from the instance log.Info("[Chaos]: Detaching the disk volume from the instance") if err = gcp.DiskVolumeDetach(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zone, experimentsDetails.DeviceNamesList[i]); err != nil { - return errors.Errorf("disk detachment failed, err: %v", err) + return stacktrace.Propagate(err, "disk detachment failed") } common.SetTargets(targetDiskVolumeNamesList[i], "injected", "DiskVolume", chaosDetails) @@ -188,7 +190,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai //Wait for disk volume detachment log.Infof("[Wait]: Wait for disk volume detachment for volume %v", targetDiskVolumeNamesList[i]) if err = gcp.WaitForVolumeDetachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - return errors.Errorf("unable to detach the disk volume from the vm instance, err: %v", err) + return stacktrace.Propagate(err, "unable to detach the disk volume from the vm instance") } } @@ -208,7 +210,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai //Getting the disk volume attachment status diskState, err := gcp.GetDiskVolumeState(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone) if err != nil { - return errors.Errorf("failed to get the disk status, err: %v", err) + return stacktrace.Propagate(err, "failed to get the disk status") } switch diskState { @@ -218,13 +220,13 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai //Attaching the disk volume to the instance log.Info("[Chaos]: Attaching the disk volume to the instance") if err = gcp.DiskVolumeAttach(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zone, experimentsDetails.DeviceNamesList[i], targetDiskVolumeNamesList[i]); err != nil { - return errors.Errorf("disk attachment failed, err: %v", err) + return stacktrace.Propagate(err, "disk attachment failed") } //Wait for disk volume attachment log.Infof("[Wait]: Wait for disk volume attachment for volume %v", targetDiskVolumeNamesList[i]) if err = gcp.WaitForVolumeAttachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - return errors.Errorf("unable to attach the disk volume to the vm instance, err: %v", err) + return stacktrace.Propagate(err, "unable to attach the disk volume to the vm instance") } } @@ -249,25 +251,25 @@ func abortWatcher(computeService *compute.Service, experimentsDetails *experimen //Getting the disk volume attachment status diskState, err := gcp.GetDiskVolumeState(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone) if err != nil { - log.Errorf("failed to get the disk state when an abort signal is received, err: %v", err) + log.Errorf("Failed to get %s disk state when an abort signal is received, err: %v", targetDiskVolumeNamesList[i], err) } if diskState != "attached" { //Wait for disk volume detachment //We first wait for the volume to get in detached state then we are attaching it. - log.Info("[Abort]: Wait for complete disk volume detachment") + log.Infof("[Abort]: Wait for %s complete disk volume detachment", targetDiskVolumeNamesList[i]) if err = gcp.WaitForVolumeDetachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - log.Errorf("unable to detach the disk volume, err: %v", err) + log.Errorf("Unable to detach %s disk volume, err: %v", targetDiskVolumeNamesList[i], err) } //Attaching the disk volume from the instance - log.Info("[Chaos]: Attaching the disk volume from the instance") + log.Infof("[Chaos]: Attaching %s disk volume to the instance", targetDiskVolumeNamesList[i]) err = gcp.DiskVolumeAttach(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zone, experimentsDetails.DeviceNamesList[i], targetDiskVolumeNamesList[i]) if err != nil { - log.Errorf("disk attachment failed when an abort signal is received, err: %v", err) + log.Errorf("%s disk attachment failed when an abort signal is received, err: %v", targetDiskVolumeNamesList[i], err) } } @@ -285,12 +287,12 @@ func getDeviceNamesAndVMInstanceNames(diskVolumeNamesList []string, computeServi instanceName, err := gcp.GetVolumeAttachmentDetails(computeService, experimentsDetails.GCPProjectID, experimentsDetails.Zones, diskVolumeNamesList[i]) if err != nil || instanceName == "" { - return errors.Errorf("failed to get the attachment info, err: %v", err) + return stacktrace.Propagate(err, "failed to get the disk attachment info") } deviceName, err := gcp.GetDiskDeviceNameForVM(computeService, diskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones, instanceName) if err != nil { - return err + return stacktrace.Propagate(err, "failed to fetch the disk device name") } experimentsDetails.TargetDiskInstanceNamesList = append(experimentsDetails.TargetDiskInstanceNamesList, instanceName) diff --git a/chaoslib/litmus/gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go b/chaoslib/litmus/gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go index a31d31145..38f06901e 100644 --- a/chaoslib/litmus/gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go +++ b/chaoslib/litmus/gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go @@ -1,12 +1,14 @@ package lib import ( + "fmt" "os" "os/signal" "strings" "syscall" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" gcp "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" "github.com/litmuschaos/litmus-go/pkg/events" @@ -15,6 +17,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" + "github.com/palantir/stacktrace" "github.com/pkg/errors" "google.golang.org/api/compute/v1" ) @@ -51,7 +54,7 @@ func PrepareDiskVolumeLoss(computeService *compute.Service, experimentsDetails * //get the device names for the given disks if err := getDeviceNamesList(computeService, experimentsDetails, diskNamesList, diskZonesList); err != nil { - return err + return stacktrace.Propagate(err, "failed to fetch the disk device names") } select { @@ -66,14 +69,14 @@ func PrepareDiskVolumeLoss(computeService *compute.Service, experimentsDetails * switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(computeService, experimentsDetails, diskNamesList, diskZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(computeService, experimentsDetails, diskNamesList, diskZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } } @@ -103,17 +106,17 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails for i := range targetDiskVolumeNamesList { //Detaching the disk volume from the instance - log.Info("[Chaos]: Detaching the disk volume from the instance") + log.Infof("[Chaos]: Detaching %s disk volume from the instance", targetDiskVolumeNamesList[i]) if err = gcp.DiskVolumeDetach(computeService, experimentsDetails.TargetDiskInstanceNamesList[i], experimentsDetails.GCPProjectID, diskZonesList[i], experimentsDetails.DeviceNamesList[i]); err != nil { - return errors.Errorf("disk detachment failed, err: %v", err) + return stacktrace.Propagate(err, "disk detachment failed") } common.SetTargets(targetDiskVolumeNamesList[i], "injected", "DiskVolume", chaosDetails) //Wait for disk volume detachment - log.Infof("[Wait]: Wait for disk volume detachment for volume %v", targetDiskVolumeNamesList[i]) + log.Infof("[Wait]: Wait for %s disk volume detachment", targetDiskVolumeNamesList[i]) if err = gcp.WaitForVolumeDetachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i], experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - return errors.Errorf("unable to detach the disk volume from the vm instance, err: %v", err) + return stacktrace.Propagate(err, "unable to detach disk volume from the vm instance") } // run the probes during chaos @@ -131,23 +134,23 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails //Getting the disk volume attachment status diskState, err := gcp.GetDiskVolumeState(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i]) if err != nil { - return errors.Errorf("failed to get the disk volume status, err: %v", err) + return stacktrace.Propagate(err, fmt.Sprintf("failed to get %s disk volume status", targetDiskVolumeNamesList[i])) } switch diskState { case "attached": - log.Info("[Skip]: The disk volume is already attached") + log.Infof("[Skip]: %s disk volume is already attached", targetDiskVolumeNamesList[i]) default: //Attaching the disk volume to the instance - log.Info("[Chaos]: Attaching the disk volume back to the instance") + log.Infof("[Chaos]: Attaching %s disk volume back to the instance", targetDiskVolumeNamesList[i]) if err = gcp.DiskVolumeAttach(computeService, experimentsDetails.TargetDiskInstanceNamesList[i], experimentsDetails.GCPProjectID, diskZonesList[i], experimentsDetails.DeviceNamesList[i], targetDiskVolumeNamesList[i]); err != nil { - return errors.Errorf("disk attachment failed, err: %v", err) + return stacktrace.Propagate(err, "disk attachment failed") } //Wait for disk volume attachment - log.Infof("[Wait]: Wait for disk volume attachment for %v volume", targetDiskVolumeNamesList[i]) + log.Infof("[Wait]: Wait for %s disk volume attachment", targetDiskVolumeNamesList[i]) if err = gcp.WaitForVolumeAttachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i], experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - return errors.Errorf("unable to attach the disk volume to the vm instance, err: %v", err) + return stacktrace.Propagate(err, "unable to attach disk volume to the vm instance") } } common.SetTargets(targetDiskVolumeNamesList[i], "reverted", "DiskVolume", chaosDetails) @@ -175,9 +178,9 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai for i := range targetDiskVolumeNamesList { //Detaching the disk volume from the instance - log.Info("[Chaos]: Detaching the disk volume from the instance") + log.Infof("[Chaos]: Detaching %s disk volume from the instance", targetDiskVolumeNamesList[i]) if err = gcp.DiskVolumeDetach(computeService, experimentsDetails.TargetDiskInstanceNamesList[i], experimentsDetails.GCPProjectID, diskZonesList[i], experimentsDetails.DeviceNamesList[i]); err != nil { - return errors.Errorf("disk detachment failed, err: %v", err) + return stacktrace.Propagate(err, "disk detachment failed") } common.SetTargets(targetDiskVolumeNamesList[i], "injected", "DiskVolume", chaosDetails) @@ -186,9 +189,9 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai for i := range targetDiskVolumeNamesList { //Wait for disk volume detachment - log.Infof("[Wait]: Wait for disk volume detachment for volume %v", targetDiskVolumeNamesList[i]) + log.Infof("[Wait]: Wait for %s disk volume detachment", targetDiskVolumeNamesList[i]) if err = gcp.WaitForVolumeDetachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i], experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - return errors.Errorf("unable to detach the disk volume from the vm instance, err: %v", err) + return stacktrace.Propagate(err, "unable to detach disk volume from the vm instance") } } @@ -213,18 +216,18 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai switch diskState { case "attached": - log.Info("[Skip]: The disk volume is already attached") + log.Infof("[Skip]: %s disk volume is already attached", targetDiskVolumeNamesList[i]) default: //Attaching the disk volume to the instance - log.Info("[Chaos]: Attaching the disk volume to the instance") + log.Infof("[Chaos]: Attaching %s disk volume to the instance", targetDiskVolumeNamesList[i]) if err = gcp.DiskVolumeAttach(computeService, experimentsDetails.TargetDiskInstanceNamesList[i], experimentsDetails.GCPProjectID, diskZonesList[i], experimentsDetails.DeviceNamesList[i], targetDiskVolumeNamesList[i]); err != nil { - return errors.Errorf("disk attachment failed, err: %v", err) + return stacktrace.Propagate(err, "disk attachment failed") } //Wait for disk volume attachment - log.Infof("[Wait]: Wait for disk volume attachment for volume %v", targetDiskVolumeNamesList[i]) + log.Infof("[Wait]: Wait for %s disk volume attachment", targetDiskVolumeNamesList[i]) if err = gcp.WaitForVolumeAttachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i], experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - return errors.Errorf("unable to attach the disk volume to the vm instance, err: %v", err) + return stacktrace.Propagate(err, "unable to attach disk volume to the vm instance") } } common.SetTargets(targetDiskVolumeNamesList[i], "reverted", "DiskVolume", chaosDetails) @@ -246,25 +249,25 @@ func abortWatcher(computeService *compute.Service, experimentsDetails *experimen //Getting the disk volume attachment status diskState, err := gcp.GetDiskVolumeState(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i]) if err != nil { - log.Errorf("failed to get the disk state when an abort signal is received, err: %v", err) + log.Errorf("Failed to get %s disk state when an abort signal is received, err: %v", targetDiskVolumeNamesList[i], err) } if diskState != "attached" { //Wait for disk volume detachment //We first wait for the volume to get in detached state then we are attaching it. - log.Info("[Abort]: Wait for complete disk volume detachment") + log.Infof("[Abort]: Wait for complete disk volume detachment for %s", targetDiskVolumeNamesList[i]) if err = gcp.WaitForVolumeDetachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i], experimentsDetails.Delay, experimentsDetails.Timeout); err != nil { - log.Errorf("unable to detach the disk volume, err: %v", err) + log.Errorf("Unable to detach %s disk volume, err: %v", targetDiskVolumeNamesList[i], err) } //Attaching the disk volume from the instance - log.Info("[Chaos]: Attaching the disk volume from the instance") + log.Infof("[Chaos]: Attaching %s disk volume from the instance", targetDiskVolumeNamesList[i]) err = gcp.DiskVolumeAttach(computeService, experimentsDetails.TargetDiskInstanceNamesList[i], experimentsDetails.GCPProjectID, diskZonesList[i], experimentsDetails.DeviceNamesList[i], targetDiskVolumeNamesList[i]) if err != nil { - log.Errorf("disk attachment failed when an abort signal is received, err: %v", err) + log.Errorf("%s disk attachment failed when an abort signal is received, err: %v", targetDiskVolumeNamesList[i], err) } } diff --git a/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib/gcp-vm-instance-stop-by-label.go b/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib/gcp-vm-instance-stop-by-label.go index d9d31dc97..ca5eb4c0c 100644 --- a/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib/gcp-vm-instance-stop-by-label.go +++ b/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib/gcp-vm-instance-stop-by-label.go @@ -1,12 +1,14 @@ package lib import ( + "fmt" "os" "os/signal" "strings" "syscall" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" gcplib "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" "github.com/litmuschaos/litmus-go/pkg/events" @@ -15,7 +17,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" "google.golang.org/api/compute/v1" ) @@ -49,14 +51,14 @@ func PrepareVMStopByLabel(computeService *compute.Service, experimentsDetails *e switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err := injectChaosInSerialMode(computeService, experimentsDetails, instanceNamesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err := injectChaosInParallelMode(computeService, experimentsDetails, instanceNamesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection @@ -96,7 +98,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails //Stopping the VM instance log.Infof("[Chaos]: Stopping %s VM instance", instanceNamesList[i]) if err := gcplib.VMInstanceStop(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { - return errors.Errorf("VM instance failed to stop, err: %v", err) + return stacktrace.Propagate(err, "VM instance failed to stop") } common.SetTargets(instanceNamesList[i], "injected", "VM", chaosDetails) @@ -104,7 +106,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails //Wait for VM instance to completely stop log.Infof("[Wait]: Wait for VM instance %s to stop", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceDown(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { - return errors.Errorf("%s vm instance failed to fully shutdown, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "vm instance failed to fully shutdown") } // run the probes during chaos @@ -125,7 +127,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails // wait for VM instance to get in running state log.Infof("[Wait]: Wait for VM instance %s to get in RUNNING state", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { - return errors.Errorf("unable to start %s vm instance, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "unable to start %s vm instance") } default: @@ -133,13 +135,13 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails // starting the VM instance log.Infof("[Chaos]: Starting back %s VM instance", instanceNamesList[i]) if err := gcplib.VMInstanceStart(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { - return errors.Errorf("%s vm instance failed to start, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "vm instance failed to start") } // wait for VM instance to get in running state log.Infof("[Wait]: Wait for VM instance %s to get in RUNNING state", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { - return errors.Errorf("unable to start %s vm instance, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "unable to start %s vm instance") } } @@ -181,7 +183,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai // stopping the VM instance log.Infof("[Chaos]: Stopping %s VM instance", instanceNamesList[i]) if err := gcplib.VMInstanceStop(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { - return errors.Errorf("%s vm instance failed to stop, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "vm instance failed to stop") } common.SetTargets(instanceNamesList[i], "injected", "VM", chaosDetails) @@ -192,7 +194,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai // wait for VM instance to completely stop log.Infof("[Wait]: Wait for VM instance %s to get in stopped state", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceDown(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { - return errors.Errorf("%s vm instance failed to fully shutdown, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "vm instance failed to fully shutdown") } } @@ -215,7 +217,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai log.Infof("[Wait]: Wait for VM instance '%v' to get in running state", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { - return errors.Errorf("unable to start the vm instance, err: %v", err) + return stacktrace.Propagate(err, "unable to start the vm instance") } common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails) @@ -228,7 +230,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai log.Info("[Chaos]: Starting back the VM instance") if err := gcplib.VMInstanceStart(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { - return errors.Errorf("vm instance failed to start, err: %v", err) + return stacktrace.Propagate(err, "vm instance failed to start") } } @@ -237,7 +239,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai log.Infof("[Wait]: Wait for VM instance '%v' to get in running state", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { - return errors.Errorf("unable to start the vm instance, err: %v", err) + return stacktrace.Propagate(err, "unable to start the vm instance") } common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails) @@ -260,19 +262,19 @@ func abortWatcher(computeService *compute.Service, experimentsDetails *experimen for i := range instanceNamesList { instanceState, err := gcplib.GetVMInstanceStatus(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones) if err != nil { - log.Errorf("fail to get instance status when an abort signal is received,err :%v", err) + log.Errorf("Failed to get %s instance status when an abort signal is received, err: %v", instanceNamesList[i], err) } if instanceState != "RUNNING" && experimentsDetails.ManagedInstanceGroup != "enable" { log.Info("[Abort]: Waiting for the VM instance to shut down") if err := gcplib.WaitForVMInstanceDown(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { - log.Errorf("unable to wait till stop of the instance, err: %v", err) + log.Errorf("Unable to wait till stop of %s instance, err: %v", instanceNamesList[i], err) } log.Info("[Abort]: Starting VM instance as abort signal received") err := gcplib.VMInstanceStart(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones) if err != nil { - log.Errorf("vm instance failed to start when an abort signal is received, err: %v", err) + log.Errorf("%s instance failed to start when an abort signal is received, err: %v", instanceNamesList[i], err) } } common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails) diff --git a/chaoslib/litmus/gcp-vm-instance-stop/lib/gcp-vm-instance-stop.go b/chaoslib/litmus/gcp-vm-instance-stop/lib/gcp-vm-instance-stop.go index 454dbabb2..2cbcfdba4 100644 --- a/chaoslib/litmus/gcp-vm-instance-stop/lib/gcp-vm-instance-stop.go +++ b/chaoslib/litmus/gcp-vm-instance-stop/lib/gcp-vm-instance-stop.go @@ -1,12 +1,14 @@ package lib import ( + "fmt" "os" "os/signal" "strings" "syscall" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" gcplib "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" "github.com/litmuschaos/litmus-go/pkg/events" @@ -15,7 +17,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" "google.golang.org/api/compute/v1" ) @@ -54,14 +56,14 @@ func PrepareVMStop(computeService *compute.Service, experimentsDetails *experime switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(computeService, experimentsDetails, instanceNamesList, instanceZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(computeService, experimentsDetails, instanceNamesList, instanceZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } // wait for the ramp time after chaos injection @@ -101,7 +103,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails //Stopping the VM instance log.Infof("[Chaos]: Stopping %s VM instance", instanceNamesList[i]) if err := gcplib.VMInstanceStop(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil { - return errors.Errorf("%s VM instance failed to stop, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "vm instance failed to stop") } common.SetTargets(instanceNamesList[i], "injected", "VM", chaosDetails) @@ -109,7 +111,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails //Wait for VM instance to completely stop log.Infof("[Wait]: Wait for VM instance %s to get in stopped state", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceDown(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil { - return errors.Errorf("%s vm instance failed to fully shutdown, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "vm instance failed to fully shutdown") } // run the probes during chaos @@ -130,13 +132,13 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails // starting the VM instance log.Infof("[Chaos]: Starting back %s VM instance", instanceNamesList[i]) if err := gcplib.VMInstanceStart(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil { - return errors.Errorf("%s vm instance failed to start, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "vm instance failed to start") } // wait for VM instance to get in running state log.Infof("[Wait]: Wait for VM instance %s to get in running state", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil { - return errors.Errorf("unable to start %s vm instance, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "unable to start vm instance") } default: @@ -144,7 +146,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails // wait for VM instance to get in running state log.Infof("[Wait]: Wait for VM instance %s to get in running state", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil { - return errors.Errorf("unable to start %s vm instance, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "unable to start vm instance") } } @@ -186,7 +188,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai // stopping the VM instance log.Infof("[Chaos]: Stopping %s VM instance", instanceNamesList[i]) if err := gcplib.VMInstanceStop(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil { - return errors.Errorf("%s vm instance failed to stop, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "vm instance failed to stop") } common.SetTargets(instanceNamesList[i], "injected", "VM", chaosDetails) @@ -197,7 +199,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai // wait for VM instance to completely stop log.Infof("[Wait]: Wait for VM instance %s to get in stopped state", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceDown(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil { - return errors.Errorf("%s vm instance failed to fully shutdown, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "vm instance failed to fully shutdown") } } @@ -219,7 +221,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai for i := range instanceNamesList { log.Infof("[Chaos]: Starting back %s VM instance", instanceNamesList[i]) if err := gcplib.VMInstanceStart(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil { - return errors.Errorf("%s vm instance failed to start, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "vm instance failed to start") } } @@ -228,7 +230,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai log.Infof("[Wait]: Wait for VM instance %s to get in running state", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil { - return errors.Errorf("unable to start %s vm instance, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "unable to start vm instance") } common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails) @@ -241,7 +243,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai log.Infof("[Wait]: Wait for VM instance %s to get in running state", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil { - return errors.Errorf("unable to start %s vm instance, err: %v", instanceNamesList[i], err) + return stacktrace.Propagate(err, "unable to start vm instance") } common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails) @@ -267,20 +269,20 @@ func abortWatcher(computeService *compute.Service, experimentsDetails *experimen instanceState, err := gcplib.GetVMInstanceStatus(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zonesList[i]) if err != nil { - log.Errorf("failed to get %s vm instance status when an abort signal is received, err: %v", instanceNamesList[i], err) + log.Errorf("Failed to get %s vm instance status when an abort signal is received, err: %v", instanceNamesList[i], err) } if instanceState != "RUNNING" { log.Infof("[Abort]: Waiting for %s VM instance to shut down", instanceNamesList[i]) if err := gcplib.WaitForVMInstanceDown(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, zonesList[i]); err != nil { - log.Errorf("unable to wait till stop of the instance, err: %v", err) + log.Errorf("Unable to wait till stop of %s instance, err: %v", instanceNamesList[i], err) } log.Infof("[Abort]: Starting %s VM instance as abort signal is received", instanceNamesList[i]) err := gcplib.VMInstanceStart(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zonesList[i]) if err != nil { - log.Errorf("%s vm instance failed to start when an abort signal is received, err: %v", instanceNamesList[i], err) + log.Errorf("%s VM instance failed to start when an abort signal is received, err: %v", instanceNamesList[i], err) } } diff --git a/chaoslib/litmus/http-chaos/helper/http-helper.go b/chaoslib/litmus/http-chaos/helper/http-helper.go index ec51e1718..43f339df3 100644 --- a/chaoslib/litmus/http-chaos/helper/http-helper.go +++ b/chaoslib/litmus/http-chaos/helper/http-helper.go @@ -1,10 +1,10 @@ package helper import ( - "bytes" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "os" - "os/exec" "os/signal" "strconv" "strings" @@ -18,7 +18,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/result" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" clientTypes "k8s.io/apimachinery/pkg/types" ) @@ -49,10 +48,11 @@ func Helper(clients clients.ClientSets) { log.Info("[PreReq]: Getting the ENV variables") getENV(&experimentsDetails) - // Intialise the chaos attributes + // Initialise the chaos attributes types.InitialiseChaosVariables(&chaosDetails) + chaosDetails.Phase = types.ChaosInjectPhase - // Intialise Chaos Result Parameters + // Initialise Chaos Result Parameters types.SetResultAttributes(&resultDetails, chaosDetails) // Set the chaos result uid @@ -60,17 +60,20 @@ func Helper(clients clients.ClientSets) { err := prepareK8sHttpChaos(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails) if err != nil { + // update failstep inside chaosresult + if resultErr := result.UpdateFailedStepFromHelper(&resultDetails, &chaosDetails, clients, err); resultErr != nil { + log.Fatalf("helper pod failed, err: %v, resultErr: %v", err, resultErr) + } log.Fatalf("helper pod failed, err: %v", err) } - } -// prepareK8sHttpChaos contains the prepration steps before chaos injection +// prepareK8sHttpChaos contains the preparation steps before chaos injection func prepareK8sHttpChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error { - targetList, err := common.ParseTargets() + targetList, err := common.ParseTargets(chaosDetails.ChaosPodName) if err != nil { - return err + return stacktrace.Propagate(err, "could not parse targets") } var targets []targetDetails @@ -80,17 +83,18 @@ func prepareK8sHttpChaos(experimentsDetails *experimentTypes.ExperimentDetails, Name: t.Name, Namespace: t.Namespace, TargetContainer: t.TargetContainer, + Source: chaosDetails.ChaosPodName, } - td.ContainerId, err = common.GetRuntimeBasedContainerID(experimentsDetails.ContainerRuntime, experimentsDetails.SocketPath, td.Name, td.Namespace, td.TargetContainer, clients) + td.ContainerId, err = common.GetRuntimeBasedContainerID(experimentsDetails.ContainerRuntime, experimentsDetails.SocketPath, td.Name, td.Namespace, td.TargetContainer, clients, td.Source) if err != nil { - return err + return stacktrace.Propagate(err, "could not get container id") } // extract out the pid of the target container - td.Pid, err = common.GetPauseAndSandboxPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath) + td.Pid, err = common.GetPauseAndSandboxPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath, td.Source) if err != nil { - return err + return stacktrace.Propagate(err, "could not get container pid") } targets = append(targets, td) } @@ -108,14 +112,14 @@ func prepareK8sHttpChaos(experimentsDetails *experimentTypes.ExperimentDetails, for _, t := range targets { // injecting http chaos inside target container if err = injectChaos(experimentsDetails, t); err != nil { - return err + return stacktrace.Propagate(err, "could not inject chaos") } log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer) if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "injected", "pod", t.Name); err != nil { - if revertErr := revertChaos(experimentsDetails, t); err != nil { - return fmt.Errorf("failed to revert and annotate the result, err: %v", fmt.Sprintf("%s, %s", err.Error(), revertErr.Error())) + if revertErr := revertChaos(experimentsDetails, t); revertErr != nil { + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(revertErr).Error())} } - return fmt.Errorf("failed to annotate the result, err: %v", err) + return stacktrace.Propagate(err, "could not annotate chaosresult") } } @@ -134,7 +138,7 @@ func prepareK8sHttpChaos(experimentsDetails *experimentTypes.ExperimentDetails, var errList []string for _, t := range targets { - // cleaning the netem process after chaos injection + // cleaning the ip rules process after chaos injection err := revertChaos(experimentsDetails, t) if err != nil { errList = append(errList, err.Error()) @@ -146,7 +150,7 @@ func prepareK8sHttpChaos(experimentsDetails *experimentTypes.ExperimentDetails, } if len(errList) != 0 { - return fmt.Errorf("failed to revert chaos, err: %v", strings.Join(errList, ",")) + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))} } return nil } @@ -154,12 +158,18 @@ func prepareK8sHttpChaos(experimentsDetails *experimentTypes.ExperimentDetails, // injectChaos inject the http chaos in target container and add ruleset to the iptables to redirect the ports func injectChaos(experimentDetails *experimentTypes.ExperimentDetails, t targetDetails) error { if err := startProxy(experimentDetails, t.Pid); err != nil { - _ = killProxy(t.Pid) - return errors.Errorf("failed to start proxy, err: %v", err) + killErr := killProxy(t.Pid, t.Source) + if killErr != nil { + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(killErr).Error())} + } + return stacktrace.Propagate(err, "could not start proxy server") } if err := addIPRuleSet(experimentDetails, t.Pid); err != nil { - _ = killProxy(t.Pid) - return errors.Errorf("failed to add ip rule set, err: %v", err) + killErr := killProxy(t.Pid, t.Source) + if killErr != nil { + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(killErr).Error())} + } + return stacktrace.Propagate(err, "could not add ip rules") } return nil } @@ -167,21 +177,17 @@ func injectChaos(experimentDetails *experimentTypes.ExperimentDetails, t targetD // revertChaos revert the http chaos in target container func revertChaos(experimentDetails *experimentTypes.ExperimentDetails, t targetDetails) error { - var revertError error + var errList []string if err := removeIPRuleSet(experimentDetails, t.Pid); err != nil { - revertError = errors.Errorf("failed to remove ip rule set, err: %v", err) + errList = append(errList, err.Error()) } - if err := killProxy(t.Pid); err != nil { - if revertError != nil { - revertError = errors.Errorf("%v and failed to kill proxy server, err: %v", revertError, err) - } else { - revertError = errors.Errorf("failed to kill proxy server, err: %v", err) - } + if err := killProxy(t.Pid, t.Source); err != nil { + errList = append(errList, err.Error()) } - if revertError != nil { - return revertError + if len(errList) != 0 { + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))} } log.Infof("successfully reverted chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer) return nil @@ -205,7 +211,7 @@ func startProxy(experimentDetails *experimentTypes.ExperimentDetails, pid int) e log.Infof("[Chaos]: Starting proxy server") - if err := runCommand(chaosCommand); err != nil { + if err := common.RunBashCommand(chaosCommand, "failed to start proxy server", experimentDetails.ChaosPodName); err != nil { return err } @@ -218,11 +224,11 @@ const NoProxyToKill = "you need to specify whom to kill" // killProxy kills the proxy process inside the target container // it is using nsenter command to enter into network namespace of target container // and execute the proxy related command inside it. -func killProxy(pid int) error { +func killProxy(pid int, source string) error { stopProxyServerCommand := fmt.Sprintf("sudo nsenter -t %d -n sudo kill -9 $(ps aux | grep [t]oxiproxy | awk 'FNR==1{print $1}')", pid) log.Infof("[Chaos]: Stopping proxy server") - if err := runCommand(stopProxyServerCommand); err != nil { + if err := common.RunBashCommand(stopProxyServerCommand, "failed to stop proxy server", source); err != nil { return err } @@ -240,7 +246,7 @@ func addIPRuleSet(experimentDetails *experimentTypes.ExperimentDetails, pid int) addIPRuleSetCommand := fmt.Sprintf("(sudo nsenter -t %d -n iptables -t nat -I PREROUTING -i %v -p tcp --dport %d -j REDIRECT --to-port %d)", pid, experimentDetails.NetworkInterface, experimentDetails.TargetServicePort, experimentDetails.ProxyPort) log.Infof("[Chaos]: Adding IPtables ruleset") - if err := runCommand(addIPRuleSetCommand); err != nil { + if err := common.RunBashCommand(addIPRuleSetCommand, "failed to add ip rules", experimentDetails.ChaosPodName); err != nil { return err } @@ -257,7 +263,7 @@ func removeIPRuleSet(experimentDetails *experimentTypes.ExperimentDetails, pid i removeIPRuleSetCommand := fmt.Sprintf("sudo nsenter -t %d -n iptables -t nat -D PREROUTING -i %v -p tcp --dport %d -j REDIRECT --to-port %d", pid, experimentDetails.NetworkInterface, experimentDetails.TargetServicePort, experimentDetails.ProxyPort) log.Infof("[Chaos]: Removing IPtables ruleset") - if err := runCommand(removeIPRuleSetCommand); err != nil { + if err := common.RunBashCommand(removeIPRuleSetCommand, "failed to remove ip rules", experimentDetails.ChaosPodName); err != nil { return err } @@ -282,25 +288,6 @@ func getENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.Toxicity, _ = strconv.Atoi(types.Getenv("TOXICITY", "100")) } -func runCommand(chaosCommand string) error { - var stdout, stderr bytes.Buffer - - cmd := exec.Command("/bin/bash", "-c", chaosCommand) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - err = cmd.Run() - errStr := stderr.String() - if err != nil { - // if we get standard error then, return the same - if errStr != "" { - return errors.New(errStr) - } - // if not standard error found, return error - return err - } - return nil -} - // abortWatcher continuously watch for the abort signals func abortWatcher(targets []targetDetails, resultName, chaosNS string, experimentDetails *experimentTypes.ExperimentDetails) { @@ -336,4 +323,5 @@ type targetDetails struct { TargetContainer string ContainerId string Pid int + Source string } diff --git a/chaoslib/litmus/http-chaos/lib/http-chaos.go b/chaoslib/litmus/http-chaos/lib/http-chaos.go index 02159fedb..f3ef4defc 100644 --- a/chaoslib/litmus/http-chaos/lib/http-chaos.go +++ b/chaoslib/litmus/http-chaos/lib/http-chaos.go @@ -3,6 +3,8 @@ package lib import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "strconv" "strings" @@ -13,7 +15,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,14 +27,14 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"} } //set up the tunables if provided in range SetChaosTunables(experimentsDetails) targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } //Waiting for the ramp time before chaos injection @@ -46,29 +47,29 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails if experimentsDetails.ChaosServiceAccount == "" { experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients) if err != nil { - return errors.Errorf("unable to get the serviceAccountName, err: %v", err) + return stacktrace.Propagate(err, "could not experiment service account") } } if experimentsDetails.EngineName != "" { if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not set helper data") } } - experimentsDetails.IsTargetContainerProvided = (experimentsDetails.TargetContainer != "") + experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, targetPodList, args, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, targetPodList, args, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } return nil @@ -77,7 +78,6 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails // injectChaosInSerialMode inject the http chaos in all target application serially (one by one) func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, args string, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { @@ -90,10 +90,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Get the target container name of the application pod if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } + experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name } log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ @@ -104,7 +101,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai runID := common.GetRunID() if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID, args); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) @@ -113,7 +110,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Info("[Status]: Checking the status of the helper pods") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pods are not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } // Wait till the completion of the helper pod @@ -122,13 +119,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true) } //Deleting all the helper pod for http chaos log.Info("[Cleanup]: Deleting the helper pod") if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pods, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } } @@ -155,7 +152,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID, args); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } } @@ -165,7 +162,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Info("[Status]: Checking the status of the helper pods") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pods are not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } // Wait till the completion of the helper pod @@ -174,13 +171,13 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true) } // Deleting all the helper pod for http chaos log.Info("[Cleanup]: Deleting all the helper pod") if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pods, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } return nil @@ -252,8 +249,10 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err - + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil } // getPodEnv derive all the env required for the helper pod @@ -278,8 +277,8 @@ func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets, a return envDetails.ENV } -//SetChaosTunables will setup a random value within a given range of values -//If the value is not provided in range it'll setup the initial provided value. +//SetChaosTunables will set up a random value within a given range of values +//If the value is not provided in range it'll set up the initial provided value. func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.PodsAffectedPerc = common.ValidateRange(experimentsDetails.PodsAffectedPerc) experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence) diff --git a/chaoslib/litmus/http-chaos/lib/statuscode/status-code.go b/chaoslib/litmus/http-chaos/lib/statuscode/status-code.go index abcff3fd5..be541f7f1 100644 --- a/chaoslib/litmus/http-chaos/lib/statuscode/status-code.go +++ b/chaoslib/litmus/http-chaos/lib/statuscode/status-code.go @@ -2,6 +2,7 @@ package statuscode import ( "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "math" "math/rand" "strconv" @@ -14,7 +15,6 @@ import ( experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -71,11 +71,11 @@ func GetStatusCode(statusCode string) (string, error) { } else { acceptedCodes := getAcceptedCodesInList(statusCodeList, acceptedStatusCodes) if len(acceptedCodes) == 0 { - return "", errors.Errorf("invalid status code provided, code: %s", statusCode) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("invalid status code: %s", statusCode)} } return acceptedCodes[rand.Intn(len(acceptedCodes))], nil } - return "", errors.Errorf("status code %s is not supported. \nList of supported status codes: %v", statusCode, acceptedStatusCodes) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("status code '%s' is not supported. Supported status codes are: %v", statusCode, acceptedStatusCodes)} } // getAcceptedCodesInList returns the list of accepted status codes from a list of status codes diff --git a/chaoslib/litmus/kafka-broker-pod-failure/lib/pod-delete.go b/chaoslib/litmus/kafka-broker-pod-failure/lib/pod-delete.go index 978afb6c7..9a78b1896 100644 --- a/chaoslib/litmus/kafka-broker-pod-failure/lib/pod-delete.go +++ b/chaoslib/litmus/kafka-broker-pod-failure/lib/pod-delete.go @@ -2,11 +2,14 @@ package lib import ( "context" + "fmt" "strconv" "strings" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/workloads" + "github.com/palantir/stacktrace" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" @@ -16,12 +19,11 @@ import ( "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//PreparePodDelete contains the prepration steps before chaos injection +// PreparePodDelete contains the prepration steps before chaos injection func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { //Waiting for the ramp time before chaos injection @@ -33,14 +35,14 @@ func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, cli switch strings.ToLower(experimentsDetails.ChaoslibDetail.Sequence) { case "serial": if err := injectChaosInSerialMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err := injectChaosInParallelMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.ChaoslibDetail.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.ChaoslibDetail.Sequence)} } //Waiting for the ramp time after chaos injection @@ -70,7 +72,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.KafkaBroker == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or KAFKA_BROKER") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "please provide one of the appLabel or KAFKA_BROKER"} } podsAffectedPerc, _ := strconv.Atoi(experimentsDetails.ChaoslibDetail.PodsAffectedPerc) @@ -109,13 +111,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai err = clients.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{}) } if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to delete the target pod: %s", err.Error())} } switch chaosDetails.Randomness { case true: if err := common.RandomInterval(experimentsDetails.ChaoslibDetail.ChaosInterval); err != nil { - return err + return stacktrace.Propagate(err, "could not get random chaos interval") } default: //Waiting for the chaos interval after chaos injection @@ -135,7 +137,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai Namespace: parent.Namespace, } if err = status.CheckUnTerminatedPodStatusesByWorkloadName(target, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check pod statuses by workload names") } } } @@ -165,19 +167,19 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.KafkaBroker == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or KAFKA_BROKER") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "please provide one of the appLabel or KAFKA_BROKER"} } podsAffectedPerc, _ := strconv.Atoi(experimentsDetails.ChaoslibDetail.PodsAffectedPerc) targetPodList, err := common.GetPodList(experimentsDetails.KafkaBroker, podsAffectedPerc, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } // deriving the parent name of the target resources for _, pod := range targetPodList.Items { kind, parentName, err := workloads.GetPodOwnerTypeAndName(&pod, clients.DynamicClient) if err != nil { - return err + return stacktrace.Propagate(err, "could not get pod owner name and kind") } common.SetParentName(parentName, kind, pod.Namespace, chaosDetails) } @@ -203,14 +205,14 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet err = clients.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{}) } if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to delete the target pod: %s", err.Error())} } } switch chaosDetails.Randomness { case true: if err := common.RandomInterval(experimentsDetails.ChaoslibDetail.ChaosInterval); err != nil { - return err + return stacktrace.Propagate(err, "could not get random chaos interval") } default: //Waiting for the chaos interval after chaos injection @@ -230,7 +232,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet Namespace: parent.Namespace, } if err = status.CheckUnTerminatedPodStatusesByWorkloadName(target, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check pod statuses by workload names") } } diff --git a/chaoslib/litmus/kubelet-service-kill/lib/kubelet-service-kill.go b/chaoslib/litmus/kubelet-service-kill/lib/kubelet-service-kill.go index 9af6a135c..dd6304680 100644 --- a/chaoslib/litmus/kubelet-service-kill/lib/kubelet-service-kill.go +++ b/chaoslib/litmus/kubelet-service-kill/lib/kubelet-service-kill.go @@ -3,6 +3,8 @@ package lib import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "strconv" clients "github.com/litmuschaos/litmus-go/pkg/clients" @@ -13,7 +15,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -27,7 +28,7 @@ func PrepareKubeletKill(experimentsDetails *experimentTypes.ExperimentDetails, c //Select node for kubelet-service-kill experimentsDetails.TargetNode, err = common.GetNodeName(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.NodeLabel, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not get node name") } } @@ -51,13 +52,13 @@ func PrepareKubeletKill(experimentsDetails *experimentTypes.ExperimentDetails, c if experimentsDetails.EngineName != "" { if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not set helper data") } } // Creating the helper pod to perform node memory hog if err = createHelperPod(experimentsDetails, clients, chaosDetails, experimentsDetails.TargetNode); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID) @@ -66,7 +67,7 @@ func PrepareKubeletKill(experimentsDetails *experimentTypes.ExperimentDetails, c log.Info("[Status]: Checking the status of the helper pod") if err = status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } common.SetTargets(experimentsDetails.TargetNode, "targeted", "node", chaosDetails) @@ -83,7 +84,7 @@ func PrepareKubeletKill(experimentsDetails *experimentTypes.ExperimentDetails, c log.Info("[Status]: Check for the node to be in NotReady state") if err = status.CheckNodeNotReadyState(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients) - return errors.Errorf("application node is not in NotReady state, err: %v", err) + return stacktrace.Propagate(err, "could not check for NOT READY state") } // Wait till the completion of helper pod @@ -92,13 +93,13 @@ func PrepareKubeletKill(experimentsDetails *experimentTypes.ExperimentDetails, c podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false) } //Deleting the helper pod log.Info("[Cleanup]: Deleting the helper pod") if err = common.DeletePod(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod") } //Waiting for the ramp time after chaos injection @@ -192,7 +193,10 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil } func ptrint64(p int64) *int64 { diff --git a/chaoslib/litmus/network-chaos/helper/netem.go b/chaoslib/litmus/network-chaos/helper/netem.go index dd78a0173..7dfadc875 100644 --- a/chaoslib/litmus/network-chaos/helper/netem.go +++ b/chaoslib/litmus/network-chaos/helper/netem.go @@ -2,7 +2,9 @@ package helper import ( "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/events" + "github.com/palantir/stacktrace" "os" "os/exec" "os/signal" @@ -54,10 +56,11 @@ func Helper(clients clients.ClientSets) { log.Info("[PreReq]: Getting the ENV variables") getENV(&experimentsDetails) - // Intialise the chaos attributes + // Initialise the chaos attributes types.InitialiseChaosVariables(&chaosDetails) + chaosDetails.Phase = types.ChaosInjectPhase - // Intialise Chaos Result Parameters + // Initialise Chaos Result Parameters types.SetResultAttributes(&resultDetails, chaosDetails) // Set the chaos result uid @@ -65,6 +68,10 @@ func Helper(clients clients.ClientSets) { err := preparePodNetworkChaos(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails) if err != nil { + // update failstep inside chaosresult + if resultErr := result.UpdateFailedStepFromHelper(&resultDetails, &chaosDetails, clients, err); resultErr != nil { + log.Fatalf("helper pod failed, err: %v, resultErr: %v", err, resultErr) + } log.Fatalf("helper pod failed, err: %v", err) } @@ -75,7 +82,7 @@ func preparePodNetworkChaos(experimentsDetails *experimentTypes.ExperimentDetail targetEnv := os.Getenv("TARGETS") if targetEnv == "" { - return fmt.Errorf("no target found, provide atleast one target") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: chaosDetails.ChaosPodName, Reason: "no target found, provide atleast one target"} } var targets []targetDetails @@ -83,24 +90,25 @@ func preparePodNetworkChaos(experimentsDetails *experimentTypes.ExperimentDetail for _, t := range strings.Split(targetEnv, ";") { target := strings.Split(t, ":") if len(target) != 4 { - return fmt.Errorf("unsupported target: '%v', provide target in ':::", target) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: chaosDetails.ChaosPodName, Reason: fmt.Sprintf("unsupported target format: '%v'", targets)} } td := targetDetails{ Name: target[0], Namespace: target[1], TargetContainer: target[2], DestinationIps: getDestIps(target[3]), + Source: chaosDetails.ChaosPodName, } - td.ContainerId, err = common.GetRuntimeBasedContainerID(experimentsDetails.ContainerRuntime, experimentsDetails.SocketPath, td.Name, td.Namespace, td.TargetContainer, clients) + td.ContainerId, err = common.GetRuntimeBasedContainerID(experimentsDetails.ContainerRuntime, experimentsDetails.SocketPath, td.Name, td.Namespace, td.TargetContainer, clients, td.Source) if err != nil { - return err + return stacktrace.Propagate(err, "could not get container id") } // extract out the pid of the target container - td.Pid, err = common.GetPauseAndSandboxPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath) + td.Pid, err = common.GetPauseAndSandboxPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath, td.Source) if err != nil { - return err + return stacktrace.Propagate(err, "could not get container pid") } targets = append(targets, td) @@ -119,14 +127,14 @@ func preparePodNetworkChaos(experimentsDetails *experimentTypes.ExperimentDetail for _, t := range targets { // injecting network chaos inside target container if err = injectChaos(experimentsDetails.NetworkInterface, t); err != nil { - return err + return stacktrace.Propagate(err, "could not inject chaos") } log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer) if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "injected", "pod", t.Name); err != nil { - if _, killErr := killnetem(t, experimentsDetails.NetworkInterface); killErr != nil { - return fmt.Errorf("unable to revert and annotate chaosresult, err: [%v, %v]", killErr, err) + if _, revertErr := killnetem(t, experimentsDetails.NetworkInterface); err != nil { + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(revertErr).Error())} } - return err + return stacktrace.Propagate(err, "could not annotate chaosresult") } } @@ -158,9 +166,8 @@ func preparePodNetworkChaos(experimentsDetails *experimentTypes.ExperimentDetail } if len(errList) != 0 { - return fmt.Errorf(" failed to revert chaos, err: %v", strings.Join(errList, ",")) + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))} } - return nil } @@ -173,11 +180,8 @@ func injectChaos(netInterface string, target targetDetails) error { if len(destIps) == 0 && len(sPorts) == 0 && len(dPorts) == 0 { tc := fmt.Sprintf("sudo nsenter -t %d -n tc qdisc replace dev %s root netem %v", target.Pid, netInterface, netemCommands) - cmd := exec.Command("/bin/bash", "-c", tc) - out, err := cmd.CombinedOutput() - log.Info(cmd.String()) - if err != nil { - log.Error(string(out)) + log.Info(tc) + if err := common.RunBashCommand(tc, "failed to create tc rules", target.Source); err != nil { return err } } else { @@ -195,22 +199,16 @@ func injectChaos(netInterface string, target targetDetails) error { // Create a priority-based queue // This instantly creates classes 1:1, 1:2, 1:3 priority := fmt.Sprintf("sudo nsenter -t %v -n tc qdisc replace dev %v root handle 1: prio", target.Pid, netInterface) - cmd := exec.Command("/bin/bash", "-c", priority) - out, err := cmd.CombinedOutput() - log.Info(cmd.String()) - if err != nil { - log.Error(string(out)) + log.Info(priority) + if err := common.RunBashCommand(priority, "failed to create priority-based queue", target.Source); err != nil { return err } // Add queueing discipline for 1:3 class. // No traffic is going through 1:3 yet traffic := fmt.Sprintf("sudo nsenter -t %v -n tc qdisc replace dev %v parent 1:3 netem %v", target.Pid, netInterface, netemCommands) - cmd = exec.Command("/bin/bash", "-c", traffic) - out, err = cmd.CombinedOutput() - log.Info(cmd.String()) - if err != nil { - log.Error(string(out)) + log.Info(traffic) + if err := common.RunBashCommand(traffic, "failed to create netem queueing discipline", target.Source); err != nil { return err } @@ -220,11 +218,8 @@ func injectChaos(netInterface string, target targetDetails) error { if strings.Contains(ip, ":") { tc = fmt.Sprintf("sudo nsenter -t %v -n tc filter add dev %v protocol ip parent 1:0 prio 3 u32 match ip6 dst %v flowid 1:3", target.Pid, netInterface, ip) } - cmd = exec.Command("/bin/bash", "-c", tc) - out, err = cmd.CombinedOutput() - log.Info(cmd.String()) - if err != nil { - log.Error(string(out)) + log.Info(tc) + if err := common.RunBashCommand(tc, "failed to create destination ips match filters", target.Source); err != nil { return err } } @@ -232,11 +227,8 @@ func injectChaos(netInterface string, target targetDetails) error { for _, port := range sPorts { //redirect traffic to specific sport through band 3 tc := fmt.Sprintf("sudo nsenter -t %v -n tc filter add dev %v protocol ip parent 1:0 prio 3 u32 match ip sport %v 0xffff flowid 1:3", target.Pid, netInterface, port) - cmd = exec.Command("/bin/bash", "-c", tc) - out, err = cmd.CombinedOutput() - log.Info(cmd.String()) - if err != nil { - log.Error(string(out)) + log.Info(tc) + if err := common.RunBashCommand(tc, "failed to create source ports match filters", target.Source); err != nil { return err } } @@ -244,11 +236,8 @@ func injectChaos(netInterface string, target targetDetails) error { for _, port := range dPorts { //redirect traffic to specific dport through band 3 tc := fmt.Sprintf("sudo nsenter -t %v -n tc filter add dev %v protocol ip parent 1:0 prio 3 u32 match ip dport %v 0xffff flowid 1:3", target.Pid, netInterface, port) - cmd = exec.Command("/bin/bash", "-c", tc) - out, err = cmd.CombinedOutput() - log.Info(cmd.String()) - if err != nil { - log.Error(string(out)) + log.Info(tc) + if err := common.RunBashCommand(tc, "failed to create destination ports match filters", target.Source); err != nil { return err } } @@ -272,8 +261,8 @@ func killnetem(target targetDetails, networkInterface string) (bool, error) { log.Warn("The network chaos process has already been removed") return true, err } - log.Error(string(out)) - return false, err + log.Error(err.Error()) + return false, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: target.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", target.Name, target.Namespace, target.TargetContainer), Reason: fmt.Sprintf("failed to revert network faults: %s", string(out))} } log.Infof("successfully reverted chaos on target: {name: %s, namespace: %v, container: %v}", target.Name, target.Namespace, target.TargetContainer) return true, nil @@ -287,6 +276,7 @@ type targetDetails struct { TargetContainer string ContainerId string Pid int + Source string } //getENV fetches all the env variables from the runner pod diff --git a/chaoslib/litmus/network-chaos/lib/network-chaos.go b/chaoslib/litmus/network-chaos/lib/network-chaos.go index a0e3a9cb0..40ff4b6ab 100644 --- a/chaoslib/litmus/network-chaos/lib/network-chaos.go +++ b/chaoslib/litmus/network-chaos/lib/network-chaos.go @@ -3,6 +3,8 @@ package lib import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" k8serrors "k8s.io/apimachinery/pkg/api/errors" "net" "strconv" @@ -15,7 +17,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,22 +26,22 @@ var serviceMesh = []string{"istio", "envoy"} var destIpsSvcMesh string var destIps string -//PrepareAndInjectChaos contains the prepration & injection steps +//PrepareAndInjectChaos contains the preparation & injection steps func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, args string) error { var err error // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"} } - //setup the tunables if provided in range + //set up the tunables if provided in range SetChaosTunables(experimentsDetails) logExperimentFields(experimentsDetails) targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } //Waiting for the ramp time before chaos injection @@ -53,28 +54,28 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails if experimentsDetails.ChaosServiceAccount == "" { experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients) if err != nil { - return errors.Errorf("unable to get the serviceAccountName, err: %v", err) + return stacktrace.Propagate(err, "could not experiment service account") } } if experimentsDetails.EngineName != "" { if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not set helper data") } } - experimentsDetails.IsTargetContainerProvided = (experimentsDetails.TargetContainer != "") + experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, args, resultDetails, eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, args, resultDetails, eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } return nil @@ -94,21 +95,18 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai serviceMesh, err := setDestIps(pod, experimentsDetails, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not set destination ips") } //Get the target container name of the application pod if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } + experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name } runID := common.GetRunID() if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer, serviceMesh), pod.Spec.NodeName, runID, args); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) @@ -117,7 +115,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Info("[Status]: Checking the status of the helper pods") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pods are not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } // Wait till the completion of the helper pod @@ -126,13 +124,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true) } //Deleting all the helper pod for network chaos log.Info("[Cleanup]: Deleting the helper pod") if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } } @@ -152,7 +150,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet targets, err := filterPodsForNodes(targetPodList, experimentsDetails, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not filter target pods") } runID := common.GetRunID() @@ -164,7 +162,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID, args); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } } @@ -174,7 +172,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Info("[Status]: Checking the status of the helper pods") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pods are not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } // Wait till the completion of the helper pod @@ -183,13 +181,13 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true) } //Deleting all the helper pod for container-kill chaos log.Info("[Cleanup]: Deleting all the helper pod") if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pods, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } return nil @@ -261,8 +259,10 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err - + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil } // getPodEnv derive all the env required for the helper pod @@ -301,13 +301,13 @@ type target struct { } // GetTargetIps return the comma separated target ips -// It fetch the ips from the target ips (if defined by users) -// it append the ips from the host, if target host is provided +// It fetches the ips from the target ips (if defined by users) +// it appends the ips from the host, if target host is provided func GetTargetIps(targetIPs, targetHosts string, clients clients.ClientSets, serviceMesh bool) (string, error) { ipsFromHost, err := getIpsForTargetHosts(targetHosts, clients, serviceMesh) if err != nil { - return "", err + return "", stacktrace.Propagate(err, "could not get ips from target hosts") } if targetIPs == "" { targetIPs = ipsFromHost @@ -317,12 +317,12 @@ func GetTargetIps(targetIPs, targetHosts string, clients clients.ClientSets, ser return targetIPs, nil } -// it derive the pod ips from the kubernetes service +// it derives the pod ips from the kubernetes service func getPodIPFromService(host string, clients clients.ClientSets) ([]string, error) { var ips []string svcFields := strings.Split(host, ".") if len(svcFields) != 5 { - return ips, fmt.Errorf("provide the valid FQDN for service in '..svc.cluster.local format, host: %v", host) + return ips, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{host: %s}", host), Reason: "provide the valid FQDN for service in '..svc.cluster.local format"} } svcName, svcNs := svcFields[0], svcFields[1] svc, err := clients.KubeClient.CoreV1().Services(svcNs).Get(context.Background(), svcName, v1.GetOptions{}) @@ -331,17 +331,29 @@ func getPodIPFromService(host string, clients clients.ClientSets) ([]string, err log.Warnf("forbidden - failed to get %v service in %v namespace, err: %v", svcName, svcNs, err) return ips, nil } - return ips, err + return ips, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{serviceName: %s, namespace: %s}", svcName, svcNs), Reason: err.Error()} } + + if svc.Spec.Selector == nil { + return nil, nil + } + var svcSelector string for k, v := range svc.Spec.Selector { - pods, err := clients.KubeClient.CoreV1().Pods(svcNs).List(context.Background(), v1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", k, v)}) - if err != nil { - return ips, err - } - for _, p := range pods.Items { - ips = append(ips, p.Status.PodIP) + if svcSelector == "" { + svcSelector += fmt.Sprintf("%s=%s", k, v) + continue } + svcSelector += fmt.Sprintf(",%s=%s", k, v) } + + pods, err := clients.KubeClient.CoreV1().Pods(svcNs).List(context.Background(), v1.ListOptions{LabelSelector: svcSelector}) + if err != nil { + return ips, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{svcName: %s,podLabel: %s, namespace: %s}", svcNs, svcSelector, svcNs), Reason: fmt.Sprintf("failed to derive pods from service: %s", err.Error())} + } + for _, p := range pods.Items { + ips = append(ips, p.Status.PodIP) + } + return ips, nil } @@ -358,7 +370,7 @@ func getIpsForTargetHosts(targetHosts string, clients clients.ClientSets, servic if strings.Contains(hosts[i], "svc.cluster.local") && serviceMesh { ips, err := getPodIPFromService(hosts[i], clients) if err != nil { - return "", err + return "", stacktrace.Propagate(err, "could not get pod ips from service") } log.Infof("Host: {%v}, IP address: {%v}", hosts[i], ips) commaSeparatedIPs = append(commaSeparatedIPs, ips...) @@ -386,14 +398,14 @@ func getIpsForTargetHosts(targetHosts string, clients clients.ClientSets, servic } } if len(commaSeparatedIPs) == 0 { - return "", errors.Errorf("provided hosts: {%v} are invalid, unable to resolve", targetHosts) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("hosts: %s", targetHosts), Reason: "provided hosts are invalid, unable to resolve"} } log.Infof("Injecting chaos on {%v} hosts", finalHosts) return strings.Join(commaSeparatedIPs, ","), nil } -//SetChaosTunables will setup a random value within a given range of values -//If the value is not provided in range it'll setup the initial provided value. +//SetChaosTunables will set up a random value within a given range of values +//If the value is not provided in range it'll set up the initial provided value. func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.NetworkPacketLossPercentage = common.ValidateRange(experimentsDetails.NetworkPacketLossPercentage) experimentsDetails.NetworkPacketCorruptionPercentage = common.ValidateRange(experimentsDetails.NetworkPacketCorruptionPercentage) @@ -439,7 +451,7 @@ func filterPodsForNodes(targetPodList apiv1.PodList, experimentsDetails *experim for _, pod := range targetPodList.Items { serviceMesh, err := setDestIps(pod, experimentsDetails, clients) if err != nil { - return targets, err + return targets, stacktrace.Propagate(err, "could not set destination ips") } if targetContainer == "" { diff --git a/chaoslib/litmus/node-cpu-hog/lib/node-cpu-hog.go b/chaoslib/litmus/node-cpu-hog/lib/node-cpu-hog.go index f72fd961d..6aa84a75d 100644 --- a/chaoslib/litmus/node-cpu-hog/lib/node-cpu-hog.go +++ b/chaoslib/litmus/node-cpu-hog/lib/node-cpu-hog.go @@ -3,6 +3,8 @@ package lib import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "strconv" "strings" @@ -14,23 +16,22 @@ import ( "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PrepareNodeCPUHog contains prepration steps before chaos injection +// PrepareNodeCPUHog contains preparation steps before chaos injection func PrepareNodeCPUHog(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - //setup the tunables if provided in range + //set up the tunables if provided in range setChaosTunables(experimentsDetails) log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{ - "Node CPU Cores": experimentsDetails.NodeCPUcores, - "CPU Load": experimentsDetails.CPULoad, - "Node Affce Perc": experimentsDetails.NodesAffectedPerc, - "Sequence": experimentsDetails.Sequence, + "Node CPU Cores": experimentsDetails.NodeCPUcores, + "CPU Load": experimentsDetails.CPULoad, + "Node Affected Percentage": experimentsDetails.NodesAffectedPerc, + "Sequence": experimentsDetails.Sequence, }) //Waiting for the ramp time before chaos injection @@ -43,7 +44,7 @@ func PrepareNodeCPUHog(experimentsDetails *experimentTypes.ExperimentDetails, cl nodesAffectedPerc, _ := strconv.Atoi(experimentsDetails.NodesAffectedPerc) targetNodeList, err := common.GetNodeList(experimentsDetails.TargetNodes, experimentsDetails.NodeLabel, nodesAffectedPerc, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not get node list") } log.InfoWithValues("[Info]: Details of Nodes under chaos injection", logrus.Fields{ @@ -53,21 +54,21 @@ func PrepareNodeCPUHog(experimentsDetails *experimentTypes.ExperimentDetails, cl if experimentsDetails.EngineName != "" { if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not set helper data") } } switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection @@ -101,20 +102,20 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // When number of cpu cores for hogging is not defined , it will take it from node capacity if nodeCPUCores == "0" { if err := setCPUCapacity(experimentsDetails, appNode, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not get node cpu capacity") } } log.InfoWithValues("[Info]: Details of Node under chaos injection", logrus.Fields{ "NodeName": appNode, - "NodeCPUcores": experimentsDetails.NodeCPUcores, + "NodeCPUCores": experimentsDetails.NodeCPUcores, }) experimentsDetails.RunID = common.GetRunID() // Creating the helper pod to perform node cpu hog if err := createHelperPod(experimentsDetails, chaosDetails, appNode, clients); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID) @@ -123,7 +124,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Info("[Status]: Checking the status of the helper pod") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } common.SetTargets(appNode, "targeted", "node", chaosDetails) @@ -133,13 +134,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false) } //Deleting the helper pod log.Info("[Cleanup]: Deleting the helper pod") if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } } return nil @@ -169,7 +170,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // When number of cpu cores for hogging is not defined , it will take it from node capacity if nodeCPUCores == "0" { if err := setCPUCapacity(experimentsDetails, appNode, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not get node cpu capacity") } } @@ -180,7 +181,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // Creating the helper pod to perform node cpu hog if err := createHelperPod(experimentsDetails, chaosDetails, appNode, clients); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } } @@ -190,7 +191,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Info("[Status]: Checking the status of the helper pods") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } for _, appNode := range targetNodeList { @@ -202,13 +203,13 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false) } //Deleting the helper pod log.Info("[Cleanup]: Deleting the helper pod") if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } return nil @@ -218,7 +219,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet func setCPUCapacity(experimentsDetails *experimentTypes.ExperimentDetails, appNode string, clients clients.ClientSets) error { node, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), appNode, v1.GetOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{nodeName: %s}", appNode), Reason: err.Error()} } experimentsDetails.NodeCPUcores = node.Status.Capacity.Cpu().String() return nil @@ -264,11 +265,14 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chao } _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil } -//setChaosTunables will setup a random value within a given range of values -//If the value is not provided in range it'll setup the initial provided value. +//setChaosTunables will set up a random value within a given range of values +//If the value is not provided in range it'll set up the initial provided value. func setChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.NodeCPUcores = common.ValidateRange(experimentsDetails.NodeCPUcores) experimentsDetails.CPULoad = common.ValidateRange(experimentsDetails.CPULoad) diff --git a/chaoslib/litmus/node-drain/lib/node-drain.go b/chaoslib/litmus/node-drain/lib/node-drain.go index dcc66143a..a276d3a24 100644 --- a/chaoslib/litmus/node-drain/lib/node-drain.go +++ b/chaoslib/litmus/node-drain/lib/node-drain.go @@ -1,8 +1,10 @@ package lib import ( - "bytes" "context" + "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "os" "os/exec" "os/signal" @@ -20,7 +22,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -30,7 +31,7 @@ var ( inject, abort chan os.Signal ) -//PrepareNodeDrain contains the prepration steps before chaos injection +//PrepareNodeDrain contains the preparation steps before chaos injection func PrepareNodeDrain(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // inject channel is used to transmit signal notifications. @@ -53,7 +54,7 @@ func PrepareNodeDrain(experimentsDetails *experimentTypes.ExperimentDetails, cli //Select node for kubelet-service-kill experimentsDetails.TargetNode, err = common.GetNodeName(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.NodeLabel, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not get node name") } } @@ -75,7 +76,7 @@ func PrepareNodeDrain(experimentsDetails *experimentTypes.ExperimentDetails, cli // Drain the application node if err := drainNode(experimentsDetails, clients, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not drain node") } // Verify the status of AUT after reschedule @@ -83,9 +84,9 @@ func PrepareNodeDrain(experimentsDetails *experimentTypes.ExperimentDetails, cli if err = status.AUTStatusCheck(clients, chaosDetails); err != nil { log.Info("[Revert]: Reverting chaos because application status check failed") if uncordonErr := uncordonNode(experimentsDetails, clients, chaosDetails); uncordonErr != nil { - log.Errorf("Unable to uncordon the node, err: %v", uncordonErr) + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(uncordonErr).Error())} } - return errors.Errorf("application status check failed, err: %v", err) + return err } // Verify the status of Auxiliary Applications after reschedule @@ -94,9 +95,9 @@ func PrepareNodeDrain(experimentsDetails *experimentTypes.ExperimentDetails, cli if err = status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Info("[Revert]: Reverting chaos because auxiliary application status check failed") if uncordonErr := uncordonNode(experimentsDetails, clients, chaosDetails); uncordonErr != nil { - log.Errorf("Unable to uncordon the node, err: %v", uncordonErr) + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(uncordonErr).Error())} } - return errors.Errorf("auxiliary Applications status check failed, err: %v", err) + return err } } @@ -108,7 +109,7 @@ func PrepareNodeDrain(experimentsDetails *experimentTypes.ExperimentDetails, cli // Uncordon the application node if err := uncordonNode(experimentsDetails, clients, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not uncordon the target node") } //Waiting for the ramp time after chaos injection @@ -119,7 +120,7 @@ func PrepareNodeDrain(experimentsDetails *experimentTypes.ExperimentDetails, cli return nil } -// drainNode drain the application node +// drainNode drain the target node func drainNode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { select { @@ -130,12 +131,8 @@ func drainNode(experimentsDetails *experimentTypes.ExperimentDetails, clients cl log.Infof("[Inject]: Draining the %v node", experimentsDetails.TargetNode) command := exec.Command("kubectl", "drain", experimentsDetails.TargetNode, "--ignore-daemonsets", "--delete-emptydir-data", "--force", "--timeout", strconv.Itoa(experimentsDetails.ChaosDuration)+"s") - var out, stderr bytes.Buffer - command.Stdout = &out - command.Stderr = &stderr - if err := command.Run(); err != nil { - log.Infof("Error String: %v", stderr.String()) - return errors.Errorf("Unable to drain the %v node, err: %v", experimentsDetails.TargetNode, err) + if err := common.RunCLICommands(command, "", fmt.Sprintf("{node: %s}", experimentsDetails.TargetNode), "failed to drain the target node", cerrors.ErrorTypeChaosInject); err != nil { + return err } common.SetTargets(experimentsDetails.TargetNode, "injected", "node", chaosDetails) @@ -146,10 +143,10 @@ func drainNode(experimentsDetails *experimentTypes.ExperimentDetails, clients cl Try(func(attempt uint) error { nodeSpec, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), experimentsDetails.TargetNode, v1.GetOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{node: %s}", experimentsDetails.TargetNode), Reason: err.Error()} } if !nodeSpec.Spec.Unschedulable { - return errors.Errorf("%v node is not in unschedulable state", experimentsDetails.TargetNode) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{node: %s}", experimentsDetails.TargetNode), Reason: "node is not in unschedule state"} } return nil }) @@ -171,18 +168,14 @@ func uncordonNode(experimentsDetails *experimentTypes.ExperimentDetails, clients common.SetTargets(targetNode, "noLongerExist", "node", chaosDetails) continue } else { - return errors.Errorf("unable to get the %v node, err: %v", targetNode, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{node: %s}", targetNode), Reason: err.Error()} } } log.Infof("[Recover]: Uncordon the %v node", targetNode) command := exec.Command("kubectl", "uncordon", targetNode) - var out, stderr bytes.Buffer - command.Stdout = &out - command.Stderr = &stderr - if err := command.Run(); err != nil { - log.Infof("Error String: %v", stderr.String()) - return errors.Errorf("unable to uncordon the %v node, err: %v", targetNode, err) + if err := common.RunCLICommands(command, "", fmt.Sprintf("{node: %s}", targetNode), "failed to uncordon the target node", cerrors.ErrorTypeChaosInject); err != nil { + return err } common.SetTargets(targetNode, "reverted", "node", chaosDetails) } @@ -198,11 +191,11 @@ func uncordonNode(experimentsDetails *experimentTypes.ExperimentDetails, clients if apierrors.IsNotFound(err) { continue } else { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{node: %s}", targetNode), Reason: err.Error()} } } if nodeSpec.Spec.Unschedulable { - return errors.Errorf("%v node is in unschedulable state", experimentsDetails.TargetNode) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{node: %s}", targetNode), Reason: "target node is in unschedule state"} } } return nil diff --git a/chaoslib/litmus/node-io-stress/lib/node-io-stress.go b/chaoslib/litmus/node-io-stress/lib/node-io-stress.go index 51f198ed2..6b4d2dc66 100644 --- a/chaoslib/litmus/node-io-stress/lib/node-io-stress.go +++ b/chaoslib/litmus/node-io-stress/lib/node-io-stress.go @@ -3,6 +3,8 @@ package lib import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "strconv" "strings" @@ -14,16 +16,15 @@ import ( "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PrepareNodeIOStress contains prepration steps before chaos injection +// PrepareNodeIOStress contains preparation steps before chaos injection func PrepareNodeIOStress(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - //setup the tunables if provided in range + //set up the tunables if provided in range setChaosTunables(experimentsDetails) log.InfoWithValues("[Info]: The details of chaos tunables are:", logrus.Fields{ @@ -31,7 +32,7 @@ func PrepareNodeIOStress(experimentsDetails *experimentTypes.ExperimentDetails, "FilesystemUtilizationPercentage": experimentsDetails.FilesystemUtilizationPercentage, "CPU Core": experimentsDetails.CPU, "NumberOfWorkers": experimentsDetails.NumberOfWorkers, - "Node Affce Perc": experimentsDetails.NodesAffectedPerc, + "Node Affected Percentage": experimentsDetails.NodesAffectedPerc, "Sequence": experimentsDetails.Sequence, }) @@ -45,7 +46,7 @@ func PrepareNodeIOStress(experimentsDetails *experimentTypes.ExperimentDetails, nodesAffectedPerc, _ := strconv.Atoi(experimentsDetails.NodesAffectedPerc) targetNodeList, err := common.GetNodeList(experimentsDetails.TargetNodes, experimentsDetails.NodeLabel, nodesAffectedPerc, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not get node list") } log.InfoWithValues("[Info]: Details of Nodes under chaos injection", logrus.Fields{ "No. Of Nodes": len(targetNodeList), @@ -54,21 +55,21 @@ func PrepareNodeIOStress(experimentsDetails *experimentTypes.ExperimentDetails, if experimentsDetails.EngineName != "" { if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not set helper data") } } switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection @@ -107,7 +108,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // Creating the helper pod to perform node io stress if err := createHelperPod(experimentsDetails, chaosDetails, appNode, clients); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID) @@ -116,7 +117,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Info("[Status]: Checking the status of the helper pod") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } common.SetTargets(appNode, "injected", "node", chaosDetails) @@ -125,13 +126,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai common.SetTargets(appNode, "reverted", "node", chaosDetails) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false) } //Deleting the helper pod log.Info("[Cleanup]: Deleting the helper pod") if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } } return nil @@ -165,7 +166,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // Creating the helper pod to perform node io stress if err := createHelperPod(experimentsDetails, chaosDetails, appNode, clients); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } } @@ -175,7 +176,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Info("[Status]: Checking the status of the helper pod") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } for _, appNode := range targetNodeList { @@ -189,13 +190,13 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false) } //Deleting the helper pod log.Info("[Cleanup]: Deleting the helper pod") if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } return nil @@ -234,7 +235,10 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chao } _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil } // getContainerArguments derives the args for the pumba stress helper pod @@ -276,8 +280,8 @@ func getContainerArguments(experimentsDetails *experimentTypes.ExperimentDetails return stressArgs } -//setChaosTunables will setup a random value within a given range of values -//If the value is not provided in range it'll setup the initial provided value. +//setChaosTunables will set up a random value within a given range of values +//If the value is not provided in range it'll set up the initial provided value. func setChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.FilesystemUtilizationBytes = common.ValidateRange(experimentsDetails.FilesystemUtilizationBytes) experimentsDetails.FilesystemUtilizationPercentage = common.ValidateRange(experimentsDetails.FilesystemUtilizationPercentage) diff --git a/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go b/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go index f6364682f..57b5bb298 100644 --- a/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go +++ b/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go @@ -3,6 +3,8 @@ package lib import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "strconv" "strings" @@ -20,17 +22,17 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PrepareNodeMemoryHog contains prepration steps before chaos injection +// PrepareNodeMemoryHog contains preparation steps before chaos injection func PrepareNodeMemoryHog(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - //setup the tunables if provided in range + //set up the tunables if provided in range setChaosTunables(experimentsDetails) log.InfoWithValues("[Info]: The details of chaos tunables are:", logrus.Fields{ "MemoryConsumptionMebibytes": experimentsDetails.MemoryConsumptionMebibytes, "MemoryConsumptionPercentage": experimentsDetails.MemoryConsumptionPercentage, "NumberOfWorkers": experimentsDetails.NumberOfWorkers, - "Node Affce Perc": experimentsDetails.NodesAffectedPerc, + "Node Affected Percentage": experimentsDetails.NodesAffectedPerc, "Sequence": experimentsDetails.Sequence, }) @@ -44,8 +46,9 @@ func PrepareNodeMemoryHog(experimentsDetails *experimentTypes.ExperimentDetails, nodesAffectedPerc, _ := strconv.Atoi(experimentsDetails.NodesAffectedPerc) targetNodeList, err := common.GetNodeList(experimentsDetails.TargetNodes, experimentsDetails.NodeLabel, nodesAffectedPerc, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not get node list") } + log.InfoWithValues("[Info]: Details of Nodes under chaos injection", logrus.Fields{ "No. Of Nodes": len(targetNodeList), "Node Names": targetNodeList, @@ -53,21 +56,21 @@ func PrepareNodeMemoryHog(experimentsDetails *experimentTypes.ExperimentDetails, if experimentsDetails.EngineName != "" { if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not set helper data") } } switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection @@ -107,18 +110,18 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Getting node memory details memoryCapacity, memoryAllocatable, err := getNodeMemoryDetails(appNode, clients) if err != nil { - return errors.Errorf("unable to get the node memory details, err: %v", err) + return stacktrace.Propagate(err, "could not get node memory details") } //Getting the exact memory value to exhaust - MemoryConsumption, err := calculateMemoryConsumption(experimentsDetails, clients, memoryCapacity, memoryAllocatable) + MemoryConsumption, err := calculateMemoryConsumption(experimentsDetails, memoryCapacity, memoryAllocatable) if err != nil { - return errors.Errorf("memory calculation failed, err: %v", err) + return stacktrace.Propagate(err, "could not calculate memory consumption value") } // Creating the helper pod to perform node memory hog if err = createHelperPod(experimentsDetails, chaosDetails, appNode, clients, MemoryConsumption); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID) @@ -127,7 +130,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Info("[Status]: Checking the status of the helper pod") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } common.SetTargets(appNode, "targeted", "node", chaosDetails) @@ -137,7 +140,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false) } else if podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) return errors.Errorf("helper pod status is %v", podStatus) @@ -146,7 +149,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Deleting the helper pod log.Info("[Cleanup]: Deleting the helper pod") if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } } return nil @@ -181,18 +184,18 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Getting node memory details memoryCapacity, memoryAllocatable, err := getNodeMemoryDetails(appNode, clients) if err != nil { - return errors.Errorf("unable to get the node memory details, err: %v", err) + return stacktrace.Propagate(err, "could not get node memory details") } //Getting the exact memory value to exhaust - MemoryConsumption, err := calculateMemoryConsumption(experimentsDetails, clients, memoryCapacity, memoryAllocatable) + MemoryConsumption, err := calculateMemoryConsumption(experimentsDetails, memoryCapacity, memoryAllocatable) if err != nil { - return errors.Errorf("memory calculation failed, err: %v", err) + return stacktrace.Propagate(err, "could not calculate memory consumption value") } // Creating the helper pod to perform node memory hog if err = createHelperPod(experimentsDetails, chaosDetails, appNode, clients, MemoryConsumption); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } } @@ -202,7 +205,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Info("[Status]: Checking the status of the helper pod") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } for _, appNode := range targetNodeList { @@ -212,18 +215,15 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // Wait till the completion of helper pod log.Info("[Wait]: Waiting till the completion of the helper pod") podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) - if err != nil { + if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) - } else if podStatus == "Failed" { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod status is %v", podStatus) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false) } //Deleting the helper pod log.Info("[Cleanup]: Deleting the helper pod") if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } return nil @@ -234,22 +234,21 @@ func getNodeMemoryDetails(appNodeName string, clients clients.ClientSets) (int, nodeDetails, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), appNodeName, v1.GetOptions{}) if err != nil { - return 0, 0, err + return 0, 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{nodeName: %s}", appNodeName), Reason: err.Error()} } memoryCapacity := int(nodeDetails.Status.Capacity.Memory().Value()) memoryAllocatable := int(nodeDetails.Status.Allocatable.Memory().Value()) if memoryCapacity == 0 || memoryAllocatable == 0 { - return memoryCapacity, memoryAllocatable, errors.Errorf("failed to get memory details of the application node") + return memoryCapacity, memoryAllocatable, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{nodeName: %s}", appNodeName), Reason: "failed to get memory details of the target node"} } return memoryCapacity, memoryAllocatable, nil - } // calculateMemoryConsumption will calculate the amount of memory to be consumed for a given unit. -func calculateMemoryConsumption(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, memoryCapacity, memoryAllocatable int) (string, error) { +func calculateMemoryConsumption(experimentsDetails *experimentTypes.ExperimentDetails, memoryCapacity, memoryAllocatable int) (string, error) { var totalMemoryConsumption int var MemoryConsumption string @@ -276,10 +275,10 @@ func calculateMemoryConsumption(experimentsDetails *experimentTypes.ExperimentDe //Getting the total memory under chaos memoryConsumptionPercentage, _ := strconv.ParseFloat(experimentsDetails.MemoryConsumptionPercentage, 64) - memoryForChaos := ((memoryConsumptionPercentage / 100) * float64(memoryCapacity)) + memoryForChaos := (memoryConsumptionPercentage / 100) * float64(memoryCapacity) //Get the percentage of memory under chaos wrt allocatable memory - totalMemoryConsumption = int((float64(memoryForChaos) / float64(memoryAllocatable)) * 100) + totalMemoryConsumption = int((memoryForChaos / float64(memoryAllocatable)) * 100) if totalMemoryConsumption > 100 { log.Infof("[Info]: PercentageOfMemoryCapacity To Be Used: %v percent, which is more than 100 percent (%d percent) of Allocatable Memory, so the experiment will only consume upto 100 percent of Allocatable Memory", experimentsDetails.MemoryConsumptionPercentage, totalMemoryConsumption) MemoryConsumption = "100%" @@ -307,7 +306,7 @@ func calculateMemoryConsumption(experimentsDetails *experimentTypes.ExperimentDe } return MemoryConsumption, nil } - return "", errors.Errorf("please specify the memory consumption value either in percentage or mebibytes in a non-decimal format using respective envs") + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "specify the memory consumption value either in percentage or mebibytes in a non-decimal format using respective envs"} } // createHelperPod derive the attributes for helper pod and create the helper pod @@ -350,11 +349,14 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chao } _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil } -//setChaosTunables will setup a random value within a given range of values -//If the value is not provided in range it'll setup the initial provided value. +//setChaosTunables will set up a random value within a given range of values +//If the value is not provided in range it'll set up the initial provided value. func setChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.MemoryConsumptionMebibytes = common.ValidateRange(experimentsDetails.MemoryConsumptionMebibytes) experimentsDetails.MemoryConsumptionPercentage = common.ValidateRange(experimentsDetails.MemoryConsumptionPercentage) diff --git a/chaoslib/litmus/node-restart/lib/node-restart.go b/chaoslib/litmus/node-restart/lib/node-restart.go index 50966ffae..8fba22b6d 100644 --- a/chaoslib/litmus/node-restart/lib/node-restart.go +++ b/chaoslib/litmus/node-restart/lib/node-restart.go @@ -3,6 +3,8 @@ package lib import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "strconv" "strings" @@ -14,7 +16,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,7 +43,7 @@ func PrepareNodeRestart(experimentsDetails *experimentTypes.ExperimentDetails, c //Select node for node-restart experimentsDetails.TargetNode, err = common.GetNodeName(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.NodeLabel, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not get node name") } } @@ -50,7 +51,7 @@ func PrepareNodeRestart(experimentsDetails *experimentTypes.ExperimentDetails, c if experimentsDetails.TargetNodeIP == "" { experimentsDetails.TargetNodeIP, err = getInternalIP(experimentsDetails.TargetNode, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not get internal ip") } } @@ -60,7 +61,6 @@ func PrepareNodeRestart(experimentsDetails *experimentTypes.ExperimentDetails, c }) experimentsDetails.RunID = common.GetRunID() - appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID) //Waiting for the ramp time before chaos injection if experimentsDetails.RampTime != 0 { @@ -80,14 +80,16 @@ func PrepareNodeRestart(experimentsDetails *experimentTypes.ExperimentDetails, c // Creating the helper pod to perform node restart if err = createHelperPod(experimentsDetails, chaosDetails, clients); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } + appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID) + //Checking the status of helper pod log.Info("[Status]: Checking the status of the helper pod") if err = status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } common.SetTargets(experimentsDetails.TargetNode, "targeted", "node", chaosDetails) @@ -105,13 +107,13 @@ func PrepareNodeRestart(experimentsDetails *experimentTypes.ExperimentDetails, c podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false) } //Deleting the helper pod log.Info("[Cleanup]: Deleting the helper pod") if err = common.DeletePod(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod") } //Waiting for the ramp time after chaos injection @@ -200,19 +202,22 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chao } _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil } // getInternalIP gets the internal ip of the given node func getInternalIP(nodeName string, clients clients.ClientSets) (string, error) { node, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), nodeName, v1.GetOptions{}) if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{nodeName: %s}", nodeName), Reason: err.Error()} } for _, addr := range node.Status.Addresses { if strings.ToLower(string(addr.Type)) == "internalip" { return addr.Address, nil } } - return "", errors.Errorf("unable to find the internal ip of the %v node", nodeName) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{nodeName: %s}", nodeName), Reason: "failed to get the internal ip of the target node"} } diff --git a/chaoslib/litmus/node-taint/lib/node-taint.go b/chaoslib/litmus/node-taint/lib/node-taint.go index 893b65e86..1d48078e8 100644 --- a/chaoslib/litmus/node-taint/lib/node-taint.go +++ b/chaoslib/litmus/node-taint/lib/node-taint.go @@ -2,6 +2,9 @@ package lib import ( "context" + "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "os" "os/signal" "strings" @@ -16,7 +19,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -26,7 +28,7 @@ var ( inject, abort chan os.Signal ) -//PrepareNodeTaint contains the prepration steps before chaos injection +//PrepareNodeTaint contains the preparation steps before chaos injection func PrepareNodeTaint(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // inject channel is used to transmit signal notifications. @@ -49,7 +51,7 @@ func PrepareNodeTaint(experimentsDetails *experimentTypes.ExperimentDetails, cli //Select node for kubelet-service-kill experimentsDetails.TargetNode, err = common.GetNodeName(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.NodeLabel, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not get node name") } } @@ -71,20 +73,27 @@ func PrepareNodeTaint(experimentsDetails *experimentTypes.ExperimentDetails, cli // taint the application node if err := taintNode(experimentsDetails, clients, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not taint node") } // Verify the status of AUT after reschedule log.Info("[Status]: Verify the status of AUT after reschedule") if err = status.AUTStatusCheck(clients, chaosDetails); err != nil { - return errors.Errorf("application status check failed, err: %v", err) + log.Info("[Revert]: Reverting chaos because application status check failed") + if taintErr := removeTaintFromNode(experimentsDetails, clients, chaosDetails); taintErr != nil { + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(taintErr).Error())} + } + return err } - // Verify the status of Auxiliary Applications after reschedule if experimentsDetails.AuxiliaryAppInfo != "" { log.Info("[Status]: Verify that the Auxiliary Applications are running") if err = status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - return errors.Errorf("auxiliary Applications status check failed, err: %v", err) + log.Info("[Revert]: Reverting chaos because auxiliary application status check failed") + if taintErr := removeTaintFromNode(experimentsDetails, clients, chaosDetails); taintErr != nil { + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(taintErr).Error())} + } + return err } } @@ -96,7 +105,7 @@ func PrepareNodeTaint(experimentsDetails *experimentTypes.ExperimentDetails, cli // remove taint from the application node if err := removeTaintFromNode(experimentsDetails, clients, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not remove taint from node") } //Waiting for the ramp time after chaos injection @@ -117,8 +126,8 @@ func taintNode(experimentsDetails *experimentTypes.ExperimentDetails, clients cl // get the node details node, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), experimentsDetails.TargetNode, v1.GetOptions{}) - if err != nil || node == nil { - return errors.Errorf("failed to get %v node, err: %v", experimentsDetails.TargetNode, err) + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{nodeName: %s}", experimentsDetails.TargetNode), Reason: err.Error()} } // check if the taint already exists @@ -142,9 +151,9 @@ func taintNode(experimentsDetails *experimentTypes.ExperimentDetails, clients cl Effect: apiv1.TaintEffect(taintEffect), }) - updatedNodeWithTaint, err := clients.KubeClient.CoreV1().Nodes().Update(context.Background(), node, v1.UpdateOptions{}) - if err != nil || updatedNodeWithTaint == nil { - return errors.Errorf("failed to update %v node after adding taints, err: %v", experimentsDetails.TargetNode, err) + _, err := clients.KubeClient.CoreV1().Nodes().Update(context.Background(), node, v1.UpdateOptions{}) + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{nodeName: %s}", node.Name), Reason: fmt.Sprintf("failed to add taints: %s", err.Error())} } } @@ -164,8 +173,8 @@ func removeTaintFromNode(experimentsDetails *experimentTypes.ExperimentDetails, // get the node details node, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), experimentsDetails.TargetNode, v1.GetOptions{}) - if err != nil || node == nil { - return errors.Errorf("failed to get %v node, err: %v", experimentsDetails.TargetNode, err) + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{nodeName: %s}", experimentsDetails.TargetNode), Reason: err.Error()} } // check if the taint already exists @@ -178,17 +187,17 @@ func removeTaintFromNode(experimentsDetails *experimentTypes.ExperimentDetails, } if tainted { - var Newtaints []apiv1.Taint + var newTaints []apiv1.Taint // remove all the taints with matching key for _, taint := range node.Spec.Taints { if taint.Key != taintKey { - Newtaints = append(Newtaints, taint) + newTaints = append(newTaints, taint) } } - node.Spec.Taints = Newtaints + node.Spec.Taints = newTaints updatedNodeWithTaint, err := clients.KubeClient.CoreV1().Nodes().Update(context.Background(), node, v1.UpdateOptions{}) if err != nil || updatedNodeWithTaint == nil { - return errors.Errorf("failed to update %v node after removing taints, err: %v", experimentsDetails.TargetNode, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{nodeName: %s}", node.Name), Reason: fmt.Sprintf("failed to remove taints: %s", err.Error())} } } diff --git a/chaoslib/litmus/pod-autoscaler/lib/pod-autoscaler.go b/chaoslib/litmus/pod-autoscaler/lib/pod-autoscaler.go index fd322ed5b..813d514ec 100644 --- a/chaoslib/litmus/pod-autoscaler/lib/pod-autoscaler.go +++ b/chaoslib/litmus/pod-autoscaler/lib/pod-autoscaler.go @@ -2,7 +2,9 @@ package lib import ( "context" - "math" + "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "os" "os/signal" "strings" @@ -12,6 +14,7 @@ import ( clients "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-autoscaler/types" "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/math" "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" @@ -20,8 +23,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" retries "k8s.io/client-go/util/retry" - - "github.com/pkg/errors" ) var ( @@ -30,7 +31,7 @@ var ( appsv1StatefulsetClient appsv1.StatefulSetInterface ) -//PreparePodAutoscaler contains the prepration steps and chaos injection steps +//PreparePodAutoscaler contains the preparation steps and chaos injection steps func PreparePodAutoscaler(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { //Waiting for the ramp time before chaos injection @@ -46,9 +47,9 @@ func PreparePodAutoscaler(experimentsDetails *experimentTypes.ExperimentDetails, switch strings.ToLower(experimentsDetails.AppKind) { case "deployment", "deployments": - appsUnderTest, err := getDeploymentDetails(experimentsDetails, clients) + appsUnderTest, err := getDeploymentDetails(experimentsDetails) if err != nil { - return errors.Errorf("fail to get the name & initial replica count of the deployment, err: %v", err) + return stacktrace.Propagate(err, "could not get deployment details") } deploymentList := []string{} @@ -64,21 +65,21 @@ func PreparePodAutoscaler(experimentsDetails *experimentTypes.ExperimentDetails, go abortPodAutoScalerChaos(appsUnderTest, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails) if err = podAutoscalerChaosInDeployment(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails); err != nil { - return errors.Errorf("fail to perform autoscaling, err: %v", err) + return stacktrace.Propagate(err, "could not scale deployment") } if err = autoscalerRecoveryInDeployment(experimentsDetails, clients, appsUnderTest, chaosDetails); err != nil { - return errors.Errorf("fail to rollback the autoscaling, err: %v", err) + return stacktrace.Propagate(err, "could not revert scaling in deployment") } case "statefulset", "statefulsets": - appsUnderTest, err := getStatefulsetDetails(experimentsDetails, clients) + appsUnderTest, err := getStatefulsetDetails(experimentsDetails) if err != nil { - return errors.Errorf("fail to get the name & initial replica count of the statefulset, err: %v", err) + return stacktrace.Propagate(err, "could not get statefulset details") } - stsList := []string{} + var stsList []string for _, sts := range appsUnderTest { stsList = append(stsList, sts.AppName) } @@ -91,15 +92,15 @@ func PreparePodAutoscaler(experimentsDetails *experimentTypes.ExperimentDetails, go abortPodAutoScalerChaos(appsUnderTest, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails) if err = podAutoscalerChaosInStatefulset(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails); err != nil { - return errors.Errorf("fail to perform autoscaling, err: %v", err) + return stacktrace.Propagate(err, "could not scale statefulset") } if err = autoscalerRecoveryInStatefulset(experimentsDetails, clients, appsUnderTest, chaosDetails); err != nil { - return errors.Errorf("fail to rollback the autoscaling, err: %v", err) + return stacktrace.Propagate(err, "could not revert scaling in statefulset") } default: - return errors.Errorf("application type '%s' is not supported for the chaos", experimentsDetails.AppKind) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{kind: %s}", experimentsDetails.AppKind), Reason: "application type is not supported"} } //Waiting for the ramp time after chaos injection @@ -110,38 +111,38 @@ func PreparePodAutoscaler(experimentsDetails *experimentTypes.ExperimentDetails, return nil } -func getSliceOfTotalApplicationsTargeted(appList []experimentTypes.ApplicationUnderTest, experimentsDetails *experimentTypes.ExperimentDetails) ([]experimentTypes.ApplicationUnderTest, error) { +func getSliceOfTotalApplicationsTargeted(appList []experimentTypes.ApplicationUnderTest, experimentsDetails *experimentTypes.ExperimentDetails) []experimentTypes.ApplicationUnderTest { - slice := int(math.Round(float64(len(appList)*experimentsDetails.AppAffectPercentage) / float64(100))) - if slice < 0 || slice > len(appList) { - return nil, errors.Errorf("slice of applications to target out of range %d/%d", slice, len(appList)) - } - return appList[:slice], nil + newAppListLength := math.Maximum(1, math.Adjustment(math.Minimum(experimentsDetails.AppAffectPercentage, 100), len(appList))) + return appList[:newAppListLength] } //getDeploymentDetails is used to get the name and total number of replicas of the deployment -func getDeploymentDetails(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) ([]experimentTypes.ApplicationUnderTest, error) { +func getDeploymentDetails(experimentsDetails *experimentTypes.ExperimentDetails) ([]experimentTypes.ApplicationUnderTest, error) { deploymentList, err := appsv1DeploymentClient.List(context.Background(), metav1.ListOptions{LabelSelector: experimentsDetails.AppLabel}) - if err != nil || len(deploymentList.Items) == 0 { - return nil, errors.Errorf("fail to get the deployments with matching labels, err: %v", err) + if err != nil { + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{kind: deployment, labels: %s}", experimentsDetails.AppLabel), Reason: err.Error()} + } else if len(deploymentList.Items) == 0 { + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{kind: deployment, labels: %s}", experimentsDetails.AppLabel), Reason: "no deployment found with matching labels"} } - appsUnderTest := []experimentTypes.ApplicationUnderTest{} + var appsUnderTest []experimentTypes.ApplicationUnderTest for _, app := range deploymentList.Items { log.Infof("[Info]: Found deployment name '%s' with replica count '%d'", app.Name, int(*app.Spec.Replicas)) appsUnderTest = append(appsUnderTest, experimentTypes.ApplicationUnderTest{AppName: app.Name, ReplicaCount: int(*app.Spec.Replicas)}) } - // Applying the APP_AFFECT_PERC variable to determine the total target deployments to scale - return getSliceOfTotalApplicationsTargeted(appsUnderTest, experimentsDetails) - + // Applying the APP_AFFECTED_PERC variable to determine the total target deployments to scale + return getSliceOfTotalApplicationsTargeted(appsUnderTest, experimentsDetails), nil } //getStatefulsetDetails is used to get the name and total number of replicas of the statefulsets -func getStatefulsetDetails(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) ([]experimentTypes.ApplicationUnderTest, error) { +func getStatefulsetDetails(experimentsDetails *experimentTypes.ExperimentDetails) ([]experimentTypes.ApplicationUnderTest, error) { statefulsetList, err := appsv1StatefulsetClient.List(context.Background(), metav1.ListOptions{LabelSelector: experimentsDetails.AppLabel}) - if err != nil || len(statefulsetList.Items) == 0 { - return nil, errors.Errorf("fail to get the statefulsets with matching labels, err: %v", err) + if err != nil { + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{kind: statefulset, labels: %s}", experimentsDetails.AppLabel), Reason: err.Error()} + } else if len(statefulsetList.Items) == 0 { + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{kind: statefulset, labels: %s}", experimentsDetails.AppLabel), Reason: "no statefulset found with matching labels"} } appsUnderTest := []experimentTypes.ApplicationUnderTest{} @@ -150,7 +151,7 @@ func getStatefulsetDetails(experimentsDetails *experimentTypes.ExperimentDetails appsUnderTest = append(appsUnderTest, experimentTypes.ApplicationUnderTest{AppName: app.Name, ReplicaCount: int(*app.Spec.Replicas)}) } // Applying the APP_AFFECT_PERC variable to determine the total target deployments to scale - return getSliceOfTotalApplicationsTargeted(appsUnderTest, experimentsDetails) + return getSliceOfTotalApplicationsTargeted(appsUnderTest, experimentsDetails), nil } //podAutoscalerChaosInDeployment scales up the replicas of deployment and verify the status @@ -163,29 +164,25 @@ func podAutoscalerChaosInDeployment(experimentsDetails *experimentTypes.Experime // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver appUnderTest, err := appsv1DeploymentClient.Get(context.Background(), app.AppName, metav1.GetOptions{}) if err != nil { - return errors.Errorf("fail to get latest version of application deployment, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: deployment, name: %s, namespace: %s}", app.AppName, experimentsDetails.AppNS), Reason: err.Error()} } // modifying the replica count appUnderTest.Spec.Replicas = int32Ptr(int32(experimentsDetails.Replicas)) log.Infof("Updating deployment '%s' to number of replicas '%d'", appUnderTest.ObjectMeta.Name, experimentsDetails.Replicas) _, err = appsv1DeploymentClient.Update(context.Background(), appUnderTest, metav1.UpdateOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: deployment, name: %s, namespace: %s}", app.AppName, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to scale deployment :%s", err.Error())} } common.SetTargets(app.AppName, "injected", "deployment", chaosDetails) } return nil }) if retryErr != nil { - return errors.Errorf("fail to update the replica count of the deployment, err: %v", retryErr) + return retryErr } log.Info("[Info]: The application started scaling") - if err = deploymentStatusCheck(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails); err != nil { - return errors.Errorf("application deployment status check failed, err: %v", err) - } - - return nil + return deploymentStatusCheck(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails) } //podAutoscalerChaosInStatefulset scales up the replicas of statefulset and verify the status @@ -198,28 +195,24 @@ func podAutoscalerChaosInStatefulset(experimentsDetails *experimentTypes.Experim // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver appUnderTest, err := appsv1StatefulsetClient.Get(context.Background(), app.AppName, metav1.GetOptions{}) if err != nil { - return errors.Errorf("fail to get latest version of the target statefulset application , err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: statefulset, name: %s, namespace: %s}", app.AppName, experimentsDetails.AppNS), Reason: err.Error()} } // modifying the replica count appUnderTest.Spec.Replicas = int32Ptr(int32(experimentsDetails.Replicas)) _, err = appsv1StatefulsetClient.Update(context.Background(), appUnderTest, metav1.UpdateOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: statefulset, name: %s, namespace: %s}", app.AppName, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to scale statefulset :%s", err.Error())} } common.SetTargets(app.AppName, "injected", "statefulset", chaosDetails) } return nil }) if retryErr != nil { - return errors.Errorf("fail to update the replica count of the statefulset application, err: %v", retryErr) + return retryErr } log.Info("[Info]: The application started scaling") - if err = statefulsetStatusCheck(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails); err != nil { - return errors.Errorf("statefulset application status check failed, err: %v", err) - } - - return nil + return statefulsetStatusCheck(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails) } // deploymentStatusCheck check the status of deployment and verify the available replicas @@ -227,7 +220,6 @@ func deploymentStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin ChaosStartTimeStamp := time.Now() - isFailed := false err = retry. Times(uint(experimentsDetails.ChaosDuration / experimentsDetails.Delay)). @@ -236,25 +228,20 @@ func deploymentStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails for _, app := range appsUnderTest { deployment, err := appsv1DeploymentClient.Get(context.Background(), app.AppName, metav1.GetOptions{}) if err != nil { - return errors.Errorf("fail to find the deployment with name %v, err: %v", app.AppName, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: deployment, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: err.Error()} } if int(deployment.Status.ReadyReplicas) != experimentsDetails.Replicas { - isFailed = true - return errors.Errorf("application %s is not scaled yet, the desired replica count is: %v and ready replica count is: %v", app.AppName, experimentsDetails.Replicas, deployment.Status.ReadyReplicas) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: deployment, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: fmt.Sprintf("failed to scale deployment, the desired replica count is: %v and ready replica count is: %v", experimentsDetails.Replicas, deployment.Status.ReadyReplicas)} } } - isFailed = false return nil }) - if isFailed { - if err = autoscalerRecoveryInDeployment(experimentsDetails, clients, appsUnderTest, chaosDetails); err != nil { - return errors.Errorf("fail to perform the autoscaler recovery of the deployment, err: %v", err) - } - return errors.Errorf("fail to scale the deployment to the desired replica count in the given chaos duration") - } if err != nil { - return err + if scaleErr := autoscalerRecoveryInDeployment(experimentsDetails, clients, appsUnderTest, chaosDetails); scaleErr != nil { + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(scaleErr).Error())} + } + return stacktrace.Propagate(err, "failed to scale replicas") } // run the probes during chaos @@ -263,6 +250,7 @@ func deploymentStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails return err } } + duration := int(time.Since(ChaosStartTimeStamp).Seconds()) if duration < experimentsDetails.ChaosDuration { log.Info("[Wait]: Waiting for completion of chaos duration") @@ -277,7 +265,6 @@ func statefulsetStatusCheck(experimentsDetails *experimentTypes.ExperimentDetail //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin ChaosStartTimeStamp := time.Now() - isFailed := false err = retry. Times(uint(experimentsDetails.ChaosDuration / experimentsDetails.Delay)). @@ -286,25 +273,20 @@ func statefulsetStatusCheck(experimentsDetails *experimentTypes.ExperimentDetail for _, app := range appsUnderTest { statefulset, err := appsv1StatefulsetClient.Get(context.Background(), app.AppName, metav1.GetOptions{}) if err != nil { - return errors.Errorf("fail to find the statefulset with name %v, err: %v", app.AppName, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: statefulset, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: err.Error()} } if int(statefulset.Status.ReadyReplicas) != experimentsDetails.Replicas { - isFailed = true - return errors.Errorf("application %s is not scaled yet, the desired replica count is: %v and ready replica count is: %v", app.AppName, experimentsDetails.Replicas, statefulset.Status.ReadyReplicas) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: statefulset, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: fmt.Sprintf("failed to scale statefulset, the desired replica count is: %v and ready replica count is: %v", experimentsDetails.Replicas, statefulset.Status.ReadyReplicas)} } } - isFailed = false return nil }) - if isFailed { - if err = autoscalerRecoveryInStatefulset(experimentsDetails, clients, appsUnderTest, chaosDetails); err != nil { - return errors.Errorf("fail to perform the autoscaler recovery of the application, err: %v", err) - } - return errors.Errorf("fail to scale the application to the desired replica count in the given chaos duration") - } if err != nil { - return err + if scaleErr := autoscalerRecoveryInStatefulset(experimentsDetails, clients, appsUnderTest, chaosDetails); scaleErr != nil { + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(scaleErr).Error())} + } + return stacktrace.Propagate(err, "failed to scale replicas") } // run the probes during chaos @@ -333,20 +315,20 @@ func autoscalerRecoveryInDeployment(experimentsDetails *experimentTypes.Experime for _, app := range appsUnderTest { appUnderTest, err := appsv1DeploymentClient.Get(context.Background(), app.AppName, metav1.GetOptions{}) if err != nil { - return errors.Errorf("fail to find the latest version of Application Deployment with name %v, err: %v", app.AppName, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: deployment, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: err.Error()} } - appUnderTest.Spec.Replicas = int32Ptr(int32(app.ReplicaCount)) // modify replica count _, err = appsv1DeploymentClient.Update(context.Background(), appUnderTest, metav1.UpdateOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: deployment, name: %s, namespace: %s}", app.AppName, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to revert scaling in deployment :%s", err.Error())} } common.SetTargets(app.AppName, "reverted", "deployment", chaosDetails) } return nil }) + if retryErr != nil { - return errors.Errorf("fail to rollback the deployment, err: %v", retryErr) + return retryErr } log.Info("[Info]: Application started rolling back to original replica count") @@ -357,11 +339,11 @@ func autoscalerRecoveryInDeployment(experimentsDetails *experimentTypes.Experime for _, app := range appsUnderTest { applicationDeploy, err := appsv1DeploymentClient.Get(context.Background(), app.AppName, metav1.GetOptions{}) if err != nil { - return errors.Errorf("fail to find the deployment with name %v, err: %v", app.AppName, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: deployment, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: err.Error()} } if int(applicationDeploy.Status.ReadyReplicas) != app.ReplicaCount { log.Infof("[Info]: Application ready replica count is: %v", applicationDeploy.Status.ReadyReplicas) - return errors.Errorf("fail to rollback to original replica count, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: deployment, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: fmt.Sprintf("failed to rollback deployment scaling, the desired replica count is: %v and ready replica count is: %v", experimentsDetails.Replicas, applicationDeploy.Status.ReadyReplicas)} } } log.Info("[RollBack]: Application rollback to the initial number of replicas") @@ -379,20 +361,20 @@ func autoscalerRecoveryInStatefulset(experimentsDetails *experimentTypes.Experim // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver appUnderTest, err := appsv1StatefulsetClient.Get(context.Background(), app.AppName, metav1.GetOptions{}) if err != nil { - return errors.Errorf("failed to find the latest version of Statefulset with name %v, err: %v", app.AppName, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: statefulset, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: err.Error()} } appUnderTest.Spec.Replicas = int32Ptr(int32(app.ReplicaCount)) // modify replica count _, err = appsv1StatefulsetClient.Update(context.Background(), appUnderTest, metav1.UpdateOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: statefulset, name: %s, namespace: %s}", app.AppName, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to revert scaling in statefulset :%s", err.Error())} } common.SetTargets(app.AppName, "reverted", "statefulset", chaosDetails) } return nil }) if retryErr != nil { - return errors.Errorf("fail to rollback the statefulset, err: %v", retryErr) + return retryErr } log.Info("[Info]: Application pod started rolling back") @@ -403,11 +385,11 @@ func autoscalerRecoveryInStatefulset(experimentsDetails *experimentTypes.Experim for _, app := range appsUnderTest { applicationDeploy, err := appsv1StatefulsetClient.Get(context.Background(), app.AppName, metav1.GetOptions{}) if err != nil { - return errors.Errorf("fail to get the statefulset with name %v, err: %v", app.AppName, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: statefulset, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: err.Error()} } if int(applicationDeploy.Status.ReadyReplicas) != app.ReplicaCount { log.Infof("Application ready replica count is: %v", applicationDeploy.Status.ReadyReplicas) - return errors.Errorf("fail to roll back to original replica count, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: statefulset, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: fmt.Sprintf("failed to rollback statefulset scaling, the desired replica count is: %v and ready replica count is: %v", experimentsDetails.Replicas, applicationDeploy.Status.ReadyReplicas)} } } log.Info("[RollBack]: Application roll back to initial number of replicas") diff --git a/chaoslib/litmus/pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go b/chaoslib/litmus/pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go index b1f2cb800..053fb6a9c 100644 --- a/chaoslib/litmus/pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go +++ b/chaoslib/litmus/pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go @@ -1,6 +1,9 @@ package lib import ( + "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "os" "os/signal" "strings" @@ -16,13 +19,37 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" litmusexec "github.com/litmuschaos/litmus-go/pkg/utils/exec" - "github.com/pkg/errors" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" ) var inject chan os.Signal +//PrepareCPUExecStress contains the chaos preparation and injection steps +func PrepareCPUExecStress(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + + // inject channel is used to transmit signal notifications. + inject = make(chan os.Signal, 1) + // Catch and relay certain signal(s) to inject channel. + signal.Notify(inject, os.Interrupt, syscall.SIGTERM) + + //Waiting for the ramp time before chaos injection + if experimentsDetails.RampTime != 0 { + log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) + common.WaitForDuration(experimentsDetails.RampTime) + } + //Starting the CPU stress experiment + if err := experimentCPU(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + return stacktrace.Propagate(err, "could not stress cpu") + } + //Waiting for the ramp time after chaos injection + if experimentsDetails.RampTime != 0 { + log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) + common.WaitForDuration(experimentsDetails.RampTime) + } + return nil +} + // stressCPU Uses the REST API to exec into the target container of the target pod // The function will be constantly increasing the CPU utilisation until it reaches the maximum available or allowed number. // Using the TOTAL_CHAOS_DURATION we will need to specify for how long this experiment will last @@ -41,11 +68,12 @@ func experimentCPU(experimentsDetails *experimentTypes.ExperimentDetails, client // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"} } + targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } podNames := []string{} @@ -54,18 +82,18 @@ func experimentCPU(experimentsDetails *experimentTypes.ExperimentDetails, client } log.Infof("Target pods list for chaos, %v", podNames) - experimentsDetails.IsTargetContainerProvided = (experimentsDetails.TargetContainer != "") + experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } return nil @@ -74,7 +102,6 @@ func experimentCPU(experimentsDetails *experimentTypes.ExperimentDetails, client // injectChaosInSerialMode stressed the cpu of all target application serially (one by one) func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { @@ -109,10 +136,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Get the target container name of the application pod if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } + experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name } log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{ @@ -142,18 +166,20 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Warn("Chaos process OOM killed") return nil } - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("podName: %s, namespace: %s, container: %s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), Reason: fmt.Sprintf("failed to stress cpu of target pod: %s", err.Error())} } case <-signChan: log.Info("[Chaos]: Revert Started") - err := killStressCPUSerial(experimentsDetails, pod.Name, pod.Namespace, clients, chaosDetails) - if err != nil { + if err := killStressCPUSerial(experimentsDetails, pod.Name, pod.Namespace, clients, chaosDetails); err != nil { log.Errorf("Error in Kill stress after abortion, err: %v", err) } // updating the chaosresult after stopped - failStep := "Chaos injection stopped!" - types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep) - result.ChaosResult(chaosDetails, clients, resultDetails, "EOT") + err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), Reason: "experiment is aborted"} + failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase)) + types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode) + if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil { + log.Errorf("failed to update chaos result %s", err.Error()) + } log.Info("[Chaos]: Revert Completed") os.Exit(1) case <-endTime: @@ -163,7 +189,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } } if err := killStressCPUSerial(experimentsDetails, pod.Name, pod.Namespace, clients, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not revert cpu stress") } } } @@ -174,7 +200,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // creating err channel to receive the error from the go routine stressErr := make(chan error) - var err error + // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { @@ -205,10 +231,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } //Get the target container name of the application pod if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } + experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name } log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{ @@ -238,7 +261,7 @@ loop: log.Warn("Chaos process OOM killed") return nil } - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("failed to stress cpu of target pod: %s", err.Error())} } case <-signChan: log.Info("[Chaos]: Revert Started") @@ -246,9 +269,12 @@ loop: log.Errorf("Error in Kill stress after abortion, err: %v", err) } // updating the chaosresult after stopped - failStep := "Chaos injection stopped!" - types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep) - result.ChaosResult(chaosDetails, clients, resultDetails, "EOT") + err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Reason: "experiment is aborted"} + failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase)) + types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode) + if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil { + log.Errorf("failed to update chaos result %s", err.Error()) + } log.Info("[Chaos]: Revert Completed") os.Exit(1) case <-endTime: @@ -260,43 +286,18 @@ loop: return killStressCPUParallel(experimentsDetails, targetPodList, clients, chaosDetails) } -//PrepareCPUExecStress contains the chaos prepration and injection steps -func PrepareCPUExecStress(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - - // inject channel is used to transmit signal notifications. - inject = make(chan os.Signal, 1) - // Catch and relay certain signal(s) to inject channel. - signal.Notify(inject, os.Interrupt, syscall.SIGTERM) - - //Waiting for the ramp time before chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - //Starting the CPU stress experiment - if err := experimentCPU(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err - } - //Waiting for the ramp time after chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - return nil -} - // killStressCPUSerial function to kill a stress process running inside target container // Triggered by either timeout of chaos duration or termination of the experiment func killStressCPUSerial(experimentsDetails *experimentTypes.ExperimentDetails, podName, ns string, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { - // It will contains all the pod & container details required for exec command + // It will contain all the pod & container details required for exec command execCommandDetails := litmusexec.PodDetails{} command := []string{"/bin/sh", "-c", experimentsDetails.ChaosKillCmd} litmusexec.SetExecCommandAttributes(&execCommandDetails, podName, experimentsDetails.TargetContainer, ns) - _, err := litmusexec.Exec(&execCommandDetails, clients, command) + out, err := litmusexec.Exec(&execCommandDetails, clients, command) if err != nil { - return errors.Errorf("Unable to kill the stress process in %v pod, err: %v", podName, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{podName: %s, namespace: %s}", podName, ns), Reason: fmt.Sprintf("failed to revert chaos: %s", out)} } common.SetTargets(podName, "reverted", "pod", chaosDetails) return nil @@ -305,12 +306,14 @@ func killStressCPUSerial(experimentsDetails *experimentTypes.ExperimentDetails, // killStressCPUParallel function to kill all the stress process running inside target container // Triggered by either timeout of chaos duration or termination of the experiment func killStressCPUParallel(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { - + var errList []string for _, pod := range targetPodList.Items { - if err := killStressCPUSerial(experimentsDetails, pod.Name, pod.Namespace, clients, chaosDetails); err != nil { - return err + errList = append(errList, err.Error()) } } + if len(errList) != 0 { + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))} + } return nil } diff --git a/chaoslib/litmus/pod-delete/lib/pod-delete.go b/chaoslib/litmus/pod-delete/lib/pod-delete.go index ecd045815..a513beb31 100644 --- a/chaoslib/litmus/pod-delete/lib/pod-delete.go +++ b/chaoslib/litmus/pod-delete/lib/pod-delete.go @@ -2,11 +2,15 @@ package lib import ( "context" - "github.com/litmuschaos/litmus-go/pkg/workloads" + "fmt" "strconv" "strings" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/workloads" + "github.com/palantir/stacktrace" + clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-delete/types" @@ -15,12 +19,11 @@ import ( "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//PreparePodDelete contains the prepration steps before chaos injection +// PreparePodDelete contains the prepration steps before chaos injection func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { //Waiting for the ramp time before chaos injection @@ -29,7 +32,7 @@ func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, cli common.WaitForDuration(experimentsDetails.RampTime) } - //setup the tunables if provided in range + //set up the tunables if provided in range SetChaosTunables(experimentsDetails) log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{ @@ -40,14 +43,14 @@ func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, cli switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err := injectChaosInSerialMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err := injectChaosInParallelMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection @@ -77,19 +80,19 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"} } targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } // deriving the parent name of the target resources for _, pod := range targetPodList.Items { kind, parentName, err := workloads.GetPodOwnerTypeAndName(&pod, clients.DynamicClient) if err != nil { - return err + return stacktrace.Propagate(err, "could not get pod owner name and kind") } common.SetParentName(parentName, kind, pod.Namespace, chaosDetails) } @@ -115,13 +118,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai err = clients.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{}) } if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to delete the target pod: %s", err.Error())} } switch chaosDetails.Randomness { case true: if err := common.RandomInterval(experimentsDetails.ChaosInterval); err != nil { - return err + return stacktrace.Propagate(err, "could not get random chaos interval") } default: //Waiting for the chaos interval after chaos injection @@ -141,7 +144,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai Namespace: parent.Namespace, } if err = status.CheckUnTerminatedPodStatusesByWorkloadName(target, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check pod statuses by workload names") } } @@ -174,18 +177,18 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "please provide one of the appLabel or TARGET_PODS"} } targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } // deriving the parent name of the target resources for _, pod := range targetPodList.Items { kind, parentName, err := workloads.GetPodOwnerTypeAndName(&pod, clients.DynamicClient) if err != nil { - return err + return stacktrace.Propagate(err, "could not get pod owner name and kind") } common.SetParentName(parentName, kind, pod.Namespace, chaosDetails) } @@ -211,14 +214,14 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet err = clients.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{}) } if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to delete the target pod: %s", err.Error())} } } switch chaosDetails.Randomness { case true: if err := common.RandomInterval(experimentsDetails.ChaosInterval); err != nil { - return err + return stacktrace.Propagate(err, "could not get random chaos interval") } default: //Waiting for the chaos interval after chaos injection @@ -238,7 +241,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet Namespace: parent.Namespace, } if err = status.CheckUnTerminatedPodStatusesByWorkloadName(target, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check pod statuses by workload names") } } duration = int(time.Since(ChaosStartTimeStamp).Seconds()) @@ -249,8 +252,8 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet return nil } -//SetChaosTunables will setup a random value within a given range of values -//If the value is not provided in range it'll setup the initial provided value. +// SetChaosTunables will setup a random value within a given range of values +// If the value is not provided in range it'll setup the initial provided value. func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.PodsAffectedPerc = common.ValidateRange(experimentsDetails.PodsAffectedPerc) experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence) diff --git a/chaoslib/litmus/pod-dns-chaos/helper/dnschaos.go b/chaoslib/litmus/pod-dns-chaos/helper/dnschaos.go index b2e08a581..c80a5a1fc 100644 --- a/chaoslib/litmus/pod-dns-chaos/helper/dnschaos.go +++ b/chaoslib/litmus/pod-dns-chaos/helper/dnschaos.go @@ -3,8 +3,8 @@ package helper import ( "bytes" "fmt" - "github.com/kyokomi/emoji" - "github.com/pkg/errors" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "os" "os/exec" "os/signal" @@ -65,6 +65,10 @@ func Helper(clients clients.ClientSets) { result.SetResultUID(&resultDetails, clients, &chaosDetails) if err := preparePodDNSChaos(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails); err != nil { + // update failstep inside chaosresult + if resultErr := result.UpdateFailedStepFromHelper(&resultDetails, &chaosDetails, clients, err); resultErr != nil { + log.Fatalf("helper pod failed, err: %v, resultErr: %v", err, resultErr) + } log.Fatalf("helper pod failed, err: %v", err) } @@ -73,9 +77,9 @@ func Helper(clients clients.ClientSets) { //preparePodDNSChaos contains the preparation steps before chaos injection func preparePodDNSChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error { - targetList, err := common.ParseTargets() + targetList, err := common.ParseTargets(chaosDetails.ChaosPodName) if err != nil { - return err + return stacktrace.Propagate(err, "could not parse targets") } var targets []targetDetails @@ -85,19 +89,19 @@ func preparePodDNSChaos(experimentsDetails *experimentTypes.ExperimentDetails, c Name: t.Name, Namespace: t.Namespace, TargetContainer: t.TargetContainer, + Source: chaosDetails.ChaosPodName, } - td.ContainerId, err = common.GetContainerID(td.Namespace, td.Name, td.TargetContainer, clients) + td.ContainerId, err = common.GetContainerID(td.Namespace, td.Name, td.TargetContainer, clients, td.Source) if err != nil { - return err + return stacktrace.Propagate(err, "could not get container id") } // extract out the pid of the target container - td.Pid, err = common.GetPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath) + td.Pid, err = common.GetPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath, td.Source) if err != nil { - return err + return stacktrace.Propagate(err, "could not get container pid") } - targets = append(targets, td) } @@ -114,16 +118,16 @@ func preparePodDNSChaos(experimentsDetails *experimentTypes.ExperimentDetails, c done := make(chan error, 1) for index, t := range targets { - targets[index].Cmd, err = injectChaos(experimentsDetails, t.Pid) + targets[index].Cmd, err = injectChaos(experimentsDetails, t) if err != nil { - return err + return stacktrace.Propagate(err, "could not inject chaos") } log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer) if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "injected", "pod", t.Name); err != nil { if revertErr := terminateProcess(t); revertErr != nil { - return fmt.Errorf("failed to revert and annotate the result, err: %v", fmt.Sprintf("%s, %s", err.Error(), revertErr.Error())) + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(revertErr).Error())} } - return err + return stacktrace.Propagate(err, "could not annotate chaosresult") } } @@ -170,7 +174,7 @@ func preparePodDNSChaos(experimentsDetails *experimentTypes.ExperimentDetails, c } } if len(errList) != 0 { - return fmt.Errorf("err: %v", strings.Join(errList, ", ")) + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))} } case doneErr := <-done: select { @@ -190,7 +194,7 @@ func preparePodDNSChaos(experimentsDetails *experimentTypes.ExperimentDetails, c } } if len(errList) != 0 { - return fmt.Errorf("err: %v", strings.Join(errList, ", ")) + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))} } return doneErr } @@ -199,18 +203,18 @@ func preparePodDNSChaos(experimentsDetails *experimentTypes.ExperimentDetails, c return nil } -func injectChaos(experimentsDetails *experimentTypes.ExperimentDetails, pid int) (*exec.Cmd, error) { +func injectChaos(experimentsDetails *experimentTypes.ExperimentDetails, t targetDetails) (*exec.Cmd, error) { // prepare dns interceptor - commandTemplate := fmt.Sprintf("sudo TARGET_PID=%d CHAOS_TYPE=%s SPOOF_MAP='%s' TARGET_HOSTNAMES='%s' CHAOS_DURATION=%d MATCH_SCHEME=%s nsutil -p -n -t %d -- dns_interceptor", pid, experimentsDetails.ChaosType, experimentsDetails.SpoofMap, experimentsDetails.TargetHostNames, experimentsDetails.ChaosDuration, experimentsDetails.MatchScheme, pid) + var out bytes.Buffer + commandTemplate := fmt.Sprintf("sudo TARGET_PID=%d CHAOS_TYPE=%s SPOOF_MAP='%s' TARGET_HOSTNAMES='%s' CHAOS_DURATION=%d MATCH_SCHEME=%s nsutil -p -n -t %d -- dns_interceptor", t.Pid, experimentsDetails.ChaosType, experimentsDetails.SpoofMap, experimentsDetails.TargetHostNames, experimentsDetails.ChaosDuration, experimentsDetails.MatchScheme, t.Pid) cmd := exec.Command("/bin/bash", "-c", commandTemplate) log.Info(cmd.String()) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr + cmd.Stdout = &out + cmd.Stderr = &out - err = cmd.Start() - if err != nil { - return nil, errors.Errorf("fail to start the dns process, err: %v", err) + if err = cmd.Start(); err != nil { + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: experimentsDetails.ChaosPodName, Target: fmt.Sprintf("{podName: %s, namespace: %s}", t.Name, t.Namespace), Reason: fmt.Sprintf("faild to inject chaos: %s", out.String())} } return cmd, nil } @@ -219,13 +223,14 @@ func terminateProcess(t targetDetails) error { // kill command killTemplate := fmt.Sprintf("sudo kill %d", t.Cmd.Process.Pid) kill := exec.Command("/bin/bash", "-c", killTemplate) - var stderr bytes.Buffer - kill.Stderr = &stderr + var out bytes.Buffer + kill.Stderr = &out + kill.Stdout = &out if err = kill.Run(); err != nil { - if strings.Contains(strings.ToLower(stderr.String()), ProcessAlreadyKilled) { + if strings.Contains(strings.ToLower(out.String()), ProcessAlreadyKilled) { return nil } - log.Errorf("unable to kill dns interceptor process %v, err :%v", emoji.Sprint(":cry:"), err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", t.Name, t.Namespace), Reason: fmt.Sprintf("failed to revert chaos %s", out.String())} } else { log.Errorf("dns interceptor process stopped") log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer) @@ -284,4 +289,5 @@ type targetDetails struct { Pid int CommandPid int Cmd *exec.Cmd + Source string } diff --git a/chaoslib/litmus/pod-dns-chaos/lib/pod-dns-chaos.go b/chaoslib/litmus/pod-dns-chaos/lib/pod-dns-chaos.go index 9e2590e0a..315533cf2 100644 --- a/chaoslib/litmus/pod-dns-chaos/lib/pod-dns-chaos.go +++ b/chaoslib/litmus/pod-dns-chaos/lib/pod-dns-chaos.go @@ -3,6 +3,8 @@ package lib import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "strconv" "strings" @@ -25,11 +27,11 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"} } targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } podNames := []string{} @@ -48,28 +50,28 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails if experimentsDetails.ChaosServiceAccount == "" { experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients) if err != nil { - return errors.Errorf("unable to get the serviceAccountName, err: %v", err) + return stacktrace.Propagate(err, "could not experiment service account") } } if experimentsDetails.EngineName != "" { if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not set helper data") } } - experimentsDetails.IsTargetContainerProvided = (experimentsDetails.TargetContainer != "") + experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } return nil @@ -78,7 +80,6 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails // injectChaosInSerialMode inject the DNS Chaos in all target application serially (one by one) func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { @@ -91,10 +92,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Get the target container name of the application pod if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } + experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name } log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ @@ -104,7 +102,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai }) runID := common.GetRunID() if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) @@ -122,13 +120,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true) } //Deleting all the helper pod for pod-dns chaos log.Info("[Cleanup]: Deleting the helper pod") if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("Unable to delete the helper pods, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } } @@ -156,7 +154,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } } @@ -166,7 +164,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Info("[Status]: Checking the status of the helper pods") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pods are not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } // Wait till the completion of the helper pod @@ -175,13 +173,13 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true) } //Deleting all the helper pod for pod-dns chaos log.Info("[Cleanup]: Deleting all the helper pod") if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("Unable to delete the helper pods, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } return nil @@ -247,8 +245,10 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err - + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil } // getPodEnv derive all the env required for the helper pod diff --git a/chaoslib/litmus/pod-fio-stress/lib/pod-fio-stress.go b/chaoslib/litmus/pod-fio-stress/lib/pod-fio-stress.go index 564c24132..b9955582d 100644 --- a/chaoslib/litmus/pod-fio-stress/lib/pod-fio-stress.go +++ b/chaoslib/litmus/pod-fio-stress/lib/pod-fio-stress.go @@ -2,6 +2,9 @@ package lib import ( "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/result" + "github.com/palantir/stacktrace" "os" "os/signal" "strings" @@ -16,11 +19,30 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" litmusexec "github.com/litmuschaos/litmus-go/pkg/utils/exec" - "github.com/pkg/errors" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" ) +//PrepareChaos contains the chaos preparation and injection steps +func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + + //Waiting for the ramp time before chaos injection + if experimentsDetails.RampTime != 0 { + log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) + common.WaitForDuration(experimentsDetails.RampTime) + } + //Starting the Fio stress experiment + if err := experimentExecution(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + return stacktrace.Propagate(err, "could not inject chaos") + } + //Waiting for the ramp time after chaos injection + if experimentsDetails.RampTime != 0 { + log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) + common.WaitForDuration(experimentsDetails.RampTime) + } + return nil +} + // stressStorage uses the REST API to exec into the target container of the target pod // The function will be constantly increasing the storage utilisation until it reaches the maximum available or allowed number. // Using the TOTAL_CHAOS_DURATION we will need to specify for how long this experiment will last @@ -49,11 +71,12 @@ func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide either of the appLabel or TARGET_PODS") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"} } + targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } podNames := []string{} @@ -62,18 +85,18 @@ func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, } log.Infof("Target pods list for chaos, %v", podNames) - experimentsDetails.IsTargetContainerProvided = (experimentsDetails.TargetContainer != "") + experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } return nil @@ -83,7 +106,6 @@ func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // creating err channel to receive the error from the go routine stressErr := make(chan error) - var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { @@ -103,10 +125,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } //Get the target container name of the application pod if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } + experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name } log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{ @@ -130,19 +149,25 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai case err := <-stressErr: // skipping the execution, if received any error other than 137, while executing stress command and marked result as fail // it will ignore the error code 137(oom kill), it will skip further execution and marked the result as pass - // oom kill occurs if stor to be stressed exceed than the resource limit for the target container + // oom kill occurs if resource to be stressed exceed than the resource limit for the target container if err != nil { if strings.Contains(err.Error(), "137") { log.Warn("Chaos process OOM killed") return nil } - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("podName: %s, namespace: %s, container: %s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), Reason: fmt.Sprintf("failed to stress cpu of target pod: %s", err.Error())} } case <-signChan: log.Info("[Chaos]: Revert Started") if err := killStressSerial(experimentsDetails.TargetContainer, pod.Name, pod.Namespace, experimentsDetails.ChaosKillCmd, clients); err != nil { log.Errorf("Error in Kill stress after abortion, err: %v", err) } + err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), Reason: "experiment is aborted"} + failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase)) + types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode) + if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil { + log.Errorf("failed to update chaos result %s", err.Error()) + } log.Info("[Chaos]: Revert Completed") os.Exit(1) case <-endTime: @@ -152,7 +177,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } } if err := killStressSerial(experimentsDetails.TargetContainer, pod.Name, pod.Namespace, experimentsDetails.ChaosKillCmd, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not revert chaos") } } return nil @@ -162,7 +187,6 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // creating err channel to receive the error from the go routine stressErr := make(chan error) - var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { @@ -182,10 +206,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } //Get the target container name of the application pod if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } + experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name } log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{ @@ -209,19 +230,25 @@ loop: case err := <-stressErr: // skipping the execution, if received any error other than 137, while executing stress command and marked result as fail // it will ignore the error code 137(oom kill), it will skip further execution and marked the result as pass - // oom kill occurs if stor to be stressed exceed than the resource limit for the target container + // oom kill occurs if resource to be stressed exceed than the resource limit for the target container if err != nil { if strings.Contains(err.Error(), "137") { log.Warn("Chaos process OOM killed") return nil } - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("failed to injcet chaos: %s", err.Error())} } case <-signChan: log.Info("[Chaos]: Revert Started") if err := killStressParallel(experimentsDetails.TargetContainer, targetPodList, experimentsDetails.ChaosKillCmd, clients); err != nil { log.Errorf("Error in Kill stress after abortion, err: %v", err) } + err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Reason: "experiment is aborted"} + failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase)) + types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode) + if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil { + log.Errorf("failed to update chaos result %s", err.Error()) + } log.Info("[Chaos]: Revert Completed") os.Exit(1) case <-endTime: @@ -230,44 +257,24 @@ loop: } } if err := killStressParallel(experimentsDetails.TargetContainer, targetPodList, experimentsDetails.ChaosKillCmd, clients); err != nil { - return err + return stacktrace.Propagate(err, "could revert chaos") } return nil } -//PrepareChaos contains the chaos prepration and injection steps -func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - - //Waiting for the ramp time before chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - //Starting the Fio stress experiment - if err := experimentExecution(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err - } - //Waiting for the ramp time after chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - return nil -} - // killStressSerial function to kill a stress process running inside target container // Triggered by either timeout of chaos duration or termination of the experiment func killStressSerial(containerName, podName, namespace, KillCmd string, clients clients.ClientSets) error { - // It will contains all the pod & container details required for exec command + // It will contain all the pod & container details required for exec command execCommandDetails := litmusexec.PodDetails{} command := []string{"/bin/sh", "-c", KillCmd} litmusexec.SetExecCommandAttributes(&execCommandDetails, podName, containerName, namespace) - _, err := litmusexec.Exec(&execCommandDetails, clients, command) + out, err := litmusexec.Exec(&execCommandDetails, clients, command) if err != nil { - return errors.Errorf("Unable to kill stress process inside target container, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{podName: %s, namespace: %s}", podName, namespace), Reason: fmt.Sprintf("failed to revert chaos: %s", out)} } return nil } @@ -275,12 +282,14 @@ func killStressSerial(containerName, podName, namespace, KillCmd string, clients // killStressParallel function to kill all the stress process running inside target container // Triggered by either timeout of chaos duration or termination of the experiment func killStressParallel(containerName string, targetPodList corev1.PodList, KillCmd string, clients clients.ClientSets) error { - + var errList []string for _, pod := range targetPodList.Items { - if err := killStressSerial(containerName, pod.Name, pod.Namespace, KillCmd, clients); err != nil { - return err + errList = append(errList, err.Error()) } } + if len(errList) != 0 { + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))} + } return nil } diff --git a/chaoslib/litmus/pod-memory-hog-exec/lib/pod-memory-hog-exec.go b/chaoslib/litmus/pod-memory-hog-exec/lib/pod-memory-hog-exec.go index 5ea2701b4..c08216468 100644 --- a/chaoslib/litmus/pod-memory-hog-exec/lib/pod-memory-hog-exec.go +++ b/chaoslib/litmus/pod-memory-hog-exec/lib/pod-memory-hog-exec.go @@ -2,6 +2,8 @@ package lib import ( "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "os" "os/signal" "strconv" @@ -18,13 +20,37 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" litmusexec "github.com/litmuschaos/litmus-go/pkg/utils/exec" - "github.com/pkg/errors" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" ) var inject chan os.Signal +//PrepareMemoryExecStress contains the chaos preparation and injection steps +func PrepareMemoryExecStress(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + + // inject channel is used to transmit signal notifications. + inject = make(chan os.Signal, 1) + // Catch and relay certain signal(s) to inject channel. + signal.Notify(inject, os.Interrupt, syscall.SIGTERM) + + //Waiting for the ramp time before chaos injection + if experimentsDetails.RampTime != 0 { + log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) + common.WaitForDuration(experimentsDetails.RampTime) + } + //Starting the Memory stress experiment + if err := experimentMemory(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + return stacktrace.Propagate(err, "could not stress memory") + } + //Waiting for the ramp time after chaos injection + if experimentsDetails.RampTime != 0 { + log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) + common.WaitForDuration(experimentsDetails.RampTime) + } + return nil +} + // stressMemory Uses the REST API to exec into the target container of the target pod // The function will be constantly increasing the Memory utilisation until it reaches the maximum available or allowed number. // Using the TOTAL_CHAOS_DURATION we will need to specify for how long this experiment will last @@ -50,11 +76,12 @@ func experimentMemory(experimentsDetails *experimentTypes.ExperimentDetails, cli // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"} } + targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } podNames := []string{} @@ -63,18 +90,18 @@ func experimentMemory(experimentsDetails *experimentTypes.ExperimentDetails, cli } log.Infof("Target pods list for chaos, %v", podNames) - experimentsDetails.IsTargetContainerProvided = (experimentsDetails.TargetContainer != "") + experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } return nil @@ -83,7 +110,6 @@ func experimentMemory(experimentsDetails *experimentTypes.ExperimentDetails, cli // injectChaosInSerialMode stressed the memory of all target application serially (one by one) func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { @@ -118,10 +144,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Get the target container name of the application pod if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } + experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name } log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{ @@ -148,7 +171,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Warn("Chaos process OOM killed") return nil } - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("podName: %s, namespace: %s, container: %s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), Reason: fmt.Sprintf("failed to stress memory of target pod: %s", err.Error())} } case <-signChan: log.Info("[Chaos]: Revert Started") @@ -156,9 +179,12 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Errorf("Error in Kill stress after abortion, err: %v", err) } // updating the chaosresult after stopped - failStep := "Chaos injection stopped!" - types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep) - result.ChaosResult(chaosDetails, clients, resultDetails, "EOT") + err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), Reason: "experiment is aborted"} + failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase)) + types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode) + if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil { + log.Errorf("failed to update chaos result %s", err.Error()) + } log.Info("[Chaos]: Revert Completed") os.Exit(1) case <-endTime: @@ -168,7 +194,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } } if err := killStressMemorySerial(experimentsDetails.TargetContainer, pod.Name, pod.Namespace, experimentsDetails.ChaosKillCmd, clients, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not revert memory stress") } } } @@ -179,7 +205,6 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // creating err channel to receive the error from the go routine stressErr := make(chan error) - var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { @@ -212,10 +237,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Get the target container name of the application pod //It checks the empty target container for the first iteration only if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } + experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name } log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{ @@ -243,13 +265,20 @@ loop: log.Warn("Chaos process OOM killed") return nil } - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("failed to stress memory of target pod: %s", err.Error())} } case <-signChan: log.Info("[Chaos]: Revert Started") if err := killStressMemoryParallel(experimentsDetails.TargetContainer, targetPodList, experimentsDetails.ChaosKillCmd, clients, chaosDetails); err != nil { log.Errorf("Error in Kill stress after abortion, err: %v", err) } + // updating the chaosresult after stopped + err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Reason: "experiment is aborted"} + failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase)) + types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode) + if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil { + log.Errorf("failed to update chaos result %s", err.Error()) + } log.Info("[Chaos]: Revert Completed") os.Exit(1) case <-endTime: @@ -260,31 +289,6 @@ loop: return killStressMemoryParallel(experimentsDetails.TargetContainer, targetPodList, experimentsDetails.ChaosKillCmd, clients, chaosDetails) } -//PrepareMemoryExecStress contains the chaos prepration and injection steps -func PrepareMemoryExecStress(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - - // inject channel is used to transmit signal notifications. - inject = make(chan os.Signal, 1) - // Catch and relay certain signal(s) to inject channel. - signal.Notify(inject, os.Interrupt, syscall.SIGTERM) - - //Waiting for the ramp time before chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - //Starting the Memory stress experiment - if err := experimentMemory(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err - } - //Waiting for the ramp time after chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - return nil -} - // killStressMemorySerial function to kill a stress process running inside target container // Triggered by either timeout of chaos duration or termination of the experiment func killStressMemorySerial(containerName, podName, namespace, memFreeCmd string, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { @@ -294,9 +298,9 @@ func killStressMemorySerial(containerName, podName, namespace, memFreeCmd string command := []string{"/bin/sh", "-c", memFreeCmd} litmusexec.SetExecCommandAttributes(&execCommandDetails, podName, containerName, namespace) - _, err := litmusexec.Exec(&execCommandDetails, clients, command) + out, err := litmusexec.Exec(&execCommandDetails, clients, command) if err != nil { - return errors.Errorf("Unable to kill stress process inside target container, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{podName: %s, namespace: %s}", podName, namespace), Reason: fmt.Sprintf("failed to revert chaos: %s", out)} } common.SetTargets(podName, "reverted", "pod", chaosDetails) return nil @@ -305,12 +309,14 @@ func killStressMemorySerial(containerName, podName, namespace, memFreeCmd string // killStressMemoryParallel function to kill all the stress process running inside target container // Triggered by either timeout of chaos duration or termination of the experiment func killStressMemoryParallel(containerName string, targetPodList corev1.PodList, memFreeCmd string, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { - + var errList []string for _, pod := range targetPodList.Items { - if err := killStressMemorySerial(containerName, pod.Name, pod.Namespace, memFreeCmd, clients, chaosDetails); err != nil { - return err + errList = append(errList, err.Error()) } } + if len(errList) != 0 { + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))} + } return nil } diff --git a/chaoslib/litmus/pod-network-partition/lib/network-policy.go b/chaoslib/litmus/pod-network-partition/lib/network-policy.go index f8d5c7ff3..786e32162 100644 --- a/chaoslib/litmus/pod-network-partition/lib/network-policy.go +++ b/chaoslib/litmus/pod-network-partition/lib/network-policy.go @@ -1,12 +1,14 @@ package lib import ( + "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/palantir/stacktrace" "strings" network_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-network-partition/types" - "github.com/pkg/errors" "gopkg.in/yaml.v2" corev1 "k8s.io/api/core/v1" networkv1 "k8s.io/api/networking/v1" @@ -52,12 +54,12 @@ func (np *NetworkPolicy) getNetworkPolicyDetails(experimentsDetails *experimentT // sets the ports for the traffic control if err := np.setPort(experimentsDetails.PORTS); err != nil { - return err + return stacktrace.Propagate(err, "could not set port") } // sets the destination ips for which the traffic should be blocked if err := np.setExceptIPs(experimentsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not set ips") } // sets the egress traffic rules @@ -138,11 +140,11 @@ func (np *NetworkPolicy) setNamespaceSelector(nsLabel string) *NetworkPolicy { // setPort sets all the protocols and ports func (np *NetworkPolicy) setPort(p string) error { - ports := []networkv1.NetworkPolicyPort{} + var ports []networkv1.NetworkPolicyPort var port Port // unmarshal the protocols and ports from the env if err := yaml.Unmarshal([]byte(strings.TrimSpace(parseCommand(p))), &port); err != nil { - return errors.Errorf("Unable to unmarshal, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to unmarshal ports: %s", err.Error())} } // sets all the tcp ports @@ -182,7 +184,7 @@ func (np *NetworkPolicy) setExceptIPs(experimentsDetails *experimentTypes.Experi // get all the target ips destinationIPs, err := network_chaos.GetTargetIps(experimentsDetails.DestinationIPs, experimentsDetails.DestinationHosts, clients.ClientSets{}, false) if err != nil { - return err + return stacktrace.Propagate(err, "could not get destination ips") } ips := strings.Split(destinationIPs, ",") diff --git a/chaoslib/litmus/pod-network-partition/lib/pod-network-partition.go b/chaoslib/litmus/pod-network-partition/lib/pod-network-partition.go index e3c3b48dc..b76a96ebf 100644 --- a/chaoslib/litmus/pod-network-partition/lib/pod-network-partition.go +++ b/chaoslib/litmus/pod-network-partition/lib/pod-network-partition.go @@ -2,8 +2,12 @@ package lib import ( "context" + "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "os" "os/signal" + "strings" "syscall" "time" @@ -15,7 +19,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" networkv1 "k8s.io/api/networking/v1" @@ -41,12 +44,13 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails // validate the appLabels if chaosDetails.AppDetail == nil { - return errors.Errorf("please provide the appLabel") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide the appLabel"} } + // Get the target pod details for the chaos execution targetPodList, err := common.GetPodList("", 100, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } podNames := []string{} @@ -67,7 +71,7 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails // collect all the data for the network policy np := initialize() if err := np.getNetworkPolicyDetails(experimentsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not get network policy details") } //DISPLAY THE NETWORK POLICY DETAILS @@ -97,7 +101,7 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails default: // creating the network policy to block the traffic if err := createNetworkPolicy(experimentsDetails, clients, np, runID); err != nil { - return err + return stacktrace.Propagate(err, "could not create network policy") } // updating chaos status to injected for the target pods for _, pod := range targetPodList.Items { @@ -106,8 +110,8 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails } // verify the presence of network policy inside cluster - if err := checkExistanceOfPolicy(experimentsDetails, clients, experimentsDetails.Timeout, experimentsDetails.Delay, runID); err != nil { - return err + if err := checkExistenceOfPolicy(experimentsDetails, clients, experimentsDetails.Timeout, experimentsDetails.Delay, runID); err != nil { + return stacktrace.Propagate(err, "could not check existence of network policy") } log.Infof("[Wait]: Wait for %v chaos duration", experimentsDetails.ChaosDuration) @@ -115,7 +119,7 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails // deleting the network policy after chaos duration over if err := deleteNetworkPolicy(experimentsDetails, clients, &targetPodList, chaosDetails, experimentsDetails.Timeout, experimentsDetails.Delay, runID); err != nil { - return err + return stacktrace.Propagate(err, "could not delete network policy") } // updating chaos status to reverted for the target pods @@ -157,7 +161,10 @@ func createNetworkPolicy(experimentsDetails *experimentTypes.ExperimentDetails, } _, err := clients.KubeClient.NetworkingV1().NetworkPolicies(experimentsDetails.AppNS).Create(context.Background(), np, v1.CreateOptions{}) - return err + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("failed to create network policy: %s", err.Error())} + } + return nil } // deleteNetworkPolicy deletes the network policy and wait until the network policy deleted completely @@ -165,7 +172,7 @@ func deleteNetworkPolicy(experimentsDetails *experimentTypes.ExperimentDetails, name := experimentsDetails.ExperimentName + "-np-" + runID labels := "name=" + experimentsDetails.ExperimentName + "-np-" + runID if err := clients.KubeClient.NetworkingV1().NetworkPolicies(experimentsDetails.AppNS).Delete(context.Background(), name, v1.DeleteOptions{}); err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{name: %s, namespace: %s}", name, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to delete network policy: %s", err.Error())} } err := retry. @@ -173,8 +180,10 @@ func deleteNetworkPolicy(experimentsDetails *experimentTypes.ExperimentDetails, Wait(time.Duration(delay) * time.Second). Try(func(attempt uint) error { npList, err := clients.KubeClient.NetworkingV1().NetworkPolicies(experimentsDetails.AppNS).List(context.Background(), v1.ListOptions{LabelSelector: labels}) - if err != nil || len(npList.Items) != 0 { - return errors.Errorf("Unable to delete the network policy, err: %v", err) + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{labels: %s, namespace: %s}", labels, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to list network policies: %s", err.Error())} + } else if len(npList.Items) != 0 { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{labels: %s, namespace: %s}", labels, experimentsDetails.AppNS), Reason: "network policies are not deleted within timeout"} } return nil }) @@ -189,8 +198,8 @@ func deleteNetworkPolicy(experimentsDetails *experimentTypes.ExperimentDetails, return nil } -// checkExistanceOfPolicy validate the presence of network policy inside the application namespace -func checkExistanceOfPolicy(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, timeout, delay int, runID string) error { +// checkExistenceOfPolicy validate the presence of network policy inside the application namespace +func checkExistenceOfPolicy(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, timeout, delay int, runID string) error { labels := "name=" + experimentsDetails.ExperimentName + "-np-" + runID return retry. @@ -198,8 +207,10 @@ func checkExistanceOfPolicy(experimentsDetails *experimentTypes.ExperimentDetail Wait(time.Duration(delay) * time.Second). Try(func(attempt uint) error { npList, err := clients.KubeClient.NetworkingV1().NetworkPolicies(experimentsDetails.AppNS).List(context.Background(), v1.ListOptions{LabelSelector: labels}) - if err != nil || len(npList.Items) == 0 { - return errors.Errorf("no network policy found, err: %v", err) + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{labels: %s, namespace: %s}", labels, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to list network policies: %s", err.Error())} + } else if len(npList.Items) == 0 { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{labels: %s, namespace: %s}", labels, experimentsDetails.AppNS), Reason: "no network policy found with matching labels"} } return nil }) @@ -215,8 +226,13 @@ func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, clients // retry thrice for the chaos revert retry := 3 for retry > 0 { - if err := checkExistanceOfPolicy(experimentsDetails, clients, 2, 1, runID); err != nil { - log.Infof("no active network policy found, err: %v", err) + if err := checkExistenceOfPolicy(experimentsDetails, clients, 2, 1, runID); err != nil { + if error, ok := err.(cerrors.Error); ok { + if strings.Contains(error.Reason, "no network policy found with matching labels") { + break + } + } + log.Infof("no active network policy found, err: %v", err.Error()) retry-- continue } @@ -224,10 +240,12 @@ func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, clients if err := deleteNetworkPolicy(experimentsDetails, clients, targetPodList, chaosDetails, 2, 1, runID); err != nil { log.Errorf("unable to delete network policy, err: %v", err) } + retry-- } // updating the chaosresult after stopped - failStep := "Chaos injection stopped!" - types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep) + err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Reason: "experiment is aborted"} + failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase)) + types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode) result.ChaosResult(chaosDetails, clients, resultDetails, "EOT") log.Info("Chaos Revert Completed") os.Exit(0) diff --git a/chaoslib/litmus/redfish-node-restart/lib/redfish-node-restart.go b/chaoslib/litmus/redfish-node-restart/lib/redfish-node-restart.go index a7f39cd75..659179b55 100644 --- a/chaoslib/litmus/redfish-node-restart/lib/redfish-node-restart.go +++ b/chaoslib/litmus/redfish-node-restart/lib/redfish-node-restart.go @@ -12,15 +12,16 @@ import ( "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" + "github.com/palantir/stacktrace" ) -//injectChaos initiates node restart chaos on the target node +// injectChaos initiates node restart chaos on the target node func injectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error { URL := fmt.Sprintf("https://%v/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset", experimentsDetails.IPMIIP) return redfishLib.RebootNode(URL, experimentsDetails.User, experimentsDetails.Password) } -//experimentExecution function orchestrates the experiment by calling the injectChaos function +// experimentExecution function orchestrates the experiment by calling the injectChaos function func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // run the probes during chaos @@ -37,15 +38,15 @@ func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, } if err := injectChaos(experimentsDetails, clients); err != nil { - return err + return stacktrace.Propagate(err, "chaos injection failed") } - log.Infof("[Chaos]:Waiting for: %vs", experimentsDetails.ChaosDuration) + log.Infof("[Chaos]: Waiting for: %vs", experimentsDetails.ChaosDuration) time.Sleep(time.Duration(experimentsDetails.ChaosDuration) * time.Second) return nil } -//PrepareChaos contains the chaos prepration and injection steps +// PrepareChaos contains the chaos prepration and injection steps func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { //Waiting for the ramp time before chaos injection diff --git a/chaoslib/litmus/spring-boot-chaos/lib/spring-boot-chaos.go b/chaoslib/litmus/spring-boot-chaos/lib/spring-boot-chaos.go index b861a4aea..cb5bbb485 100644 --- a/chaoslib/litmus/spring-boot-chaos/lib/spring-boot-chaos.go +++ b/chaoslib/litmus/spring-boot-chaos/lib/spring-boot-chaos.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" "net/http" "os" "os/signal" @@ -12,6 +11,10 @@ import ( "syscall" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" + corev1 "k8s.io/api/core/v1" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" "github.com/litmuschaos/litmus-go/pkg/log" @@ -20,7 +23,6 @@ import ( experimentTypes "github.com/litmuschaos/litmus-go/pkg/spring-boot/spring-boot-chaos/types" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -39,7 +41,7 @@ func SetTargetPodList(experimentsDetails *experimentTypes.ExperimentDetails, cli var err error if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "please provide one of the appLabel or TARGET_PODS"} } if experimentsDetails.TargetPodList, err = common.GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails); err != nil { return err @@ -68,14 +70,14 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err := injectChaosInSerialMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err := injectChaosInParallelMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } // Waiting for the ramp time after chaos injection @@ -91,25 +93,30 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients func CheckChaosMonkey(chaosMonkeyPort string, chaosMonkeyPath string, targetPods corev1.PodList) (bool, error) { hasErrors := false + targetPodNames := []string{} + for _, pod := range targetPods.Items { + + targetPodNames = append(targetPodNames, pod.Name) + endpoint := "http://" + pod.Status.PodIP + ":" + chaosMonkeyPort + chaosMonkeyPath log.Infof("[Check]: Checking pod: %v (endpoint: %v)", pod.Name, endpoint) resp, err := http.Get(endpoint) if err != nil { - log.Errorf("failed to request chaos monkey endpoint on pod %v (err: %v)", pod.Name, resp.StatusCode) + log.Errorf("failed to request chaos monkey endpoint on pod %s, %s", pod.Name, err.Error()) hasErrors = true continue } if resp.StatusCode != 200 { - log.Errorf("failed to get chaos monkey endpoint on pod %v (status: %v)", pod.Name, resp.StatusCode) + log.Errorf("failed to get chaos monkey endpoint on pod %s (status: %d)", pod.Name, resp.StatusCode) hasErrors = true } } if hasErrors { - return false, errors.Errorf("failed to check chaos moonkey on at least one pod, check logs for details") + return false, cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podNames: %s}", targetPodNames), Reason: "failed to check chaos monkey on at least one pod, check logs for details"} } return true, nil } @@ -123,7 +130,7 @@ func enableChaosMonkey(chaosMonkeyPort string, chaosMonkeyPath string, pod corev } if resp.StatusCode != 200 { - return errors.Errorf("failed to enable chaos monkey endpoint on pod %v (status: %v)", pod.Name, resp.StatusCode) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to enable chaos monkey endpoint (status: %d)", resp.StatusCode)} } return nil @@ -134,16 +141,16 @@ func setChaosMonkeyWatchers(chaosMonkeyPort string, chaosMonkeyPath string, watc jsonValue, err := json.Marshal(watchers) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to marshal chaos monkey watchers, %s", err.Error())} } resp, err := http.Post("http://"+pod.Status.PodIP+":"+chaosMonkeyPort+chaosMonkeyPath+"/watchers", "application/json", bytes.NewBuffer(jsonValue)) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to call the chaos monkey api to set watchers, %s", err.Error())} } if resp.StatusCode != 200 { - return errors.Errorf("failed to set assault on pod %v (status: %v)", pod.Name, resp.StatusCode) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to set assault (status: %d)", resp.StatusCode)} } return nil @@ -156,11 +163,11 @@ func startAssault(chaosMonkeyPort string, chaosMonkeyPath string, assault []byte log.Infof("[Chaos]: Activating Chaos Monkey assault on pod: %v", pod.Name) resp, err := http.Post("http://"+pod.Status.PodIP+":"+chaosMonkeyPort+chaosMonkeyPath+"/assaults/runtime/attack", "", nil) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to call the chaos monkey api to start assault %s", err.Error())} } if resp.StatusCode != 200 { - return errors.Errorf("failed to activate runtime attack on pod %v (status: %v)", pod.Name, resp.StatusCode) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to activate runtime attack (status: %d)", resp.StatusCode)} } return nil } @@ -170,34 +177,34 @@ func setChaosMonkeyAssault(chaosMonkeyPort string, chaosMonkeyPath string, assau resp, err := http.Post("http://"+pod.Status.PodIP+":"+chaosMonkeyPort+chaosMonkeyPath+"/assaults", "application/json", bytes.NewBuffer(assault)) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to call the chaos monkey api to set assault, %s", err.Error())} } if resp.StatusCode != 200 { - return errors.Errorf("failed to set assault on pod %v (status: %v)", pod.Name, resp.StatusCode) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to set assault (status: %d)", resp.StatusCode)} } return nil } // disableChaosMonkey disables chaos monkey on selected pods func disableChaosMonkey(chaosMonkeyPort string, chaosMonkeyPath string, pod corev1.Pod) error { - log.Infof("[Chaos]: disabling assaults on pod %v", pod.Name) + log.Infof("[Chaos]: disabling assaults on pod %s", pod.Name) jsonValue, err := json.Marshal(revertAssault) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to marshal chaos monkey revert-chaos watchers, %s", err.Error())} } if err := setChaosMonkeyAssault(chaosMonkeyPort, chaosMonkeyPath, jsonValue, pod); err != nil { return err } - log.Infof("[Chaos]: disabling chaos monkey on pod %v", pod.Name) + log.Infof("[Chaos]: disabling chaos monkey on pod %s", pod.Name) resp, err := http.Post("http://"+pod.Status.PodIP+":"+chaosMonkeyPort+chaosMonkeyPath+"/disable", "", nil) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to call the chaos monkey api to disable assault, %s", err.Error())} } if resp.StatusCode != 200 { - return errors.Errorf("failed to disable chaos monkey endpoint on pod %v (status: %v)", pod.Name, resp.StatusCode) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to disable chaos monkey endpoint (status: %d)", resp.StatusCode)} } return nil @@ -269,7 +276,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // updating the chaosresult after stopped failStep := "Chaos injection stopped!" - types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep) + types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, cerrors.ErrorTypeExperimentAborted) result.ChaosResult(chaosDetails, clients, resultDetails, "EOT") log.Info("[Chaos]: Revert Completed") os.Exit(1) @@ -281,7 +288,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } if err := disableChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil { - return fmt.Errorf("error in disabling chaos monkey, err: %v", err) + return err } common.SetTargets(pod.Name, "reverted", "pod", chaosDetails) @@ -327,16 +334,17 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet }) if err := setChaosMonkeyWatchers(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, experimentsDetails.ChaosMonkeyWatchers, pod); err != nil { - return errors.Errorf("[Chaos]: Failed to set watchers, err: %v ", err) + log.Errorf("[Chaos]: Failed to set watchers, err: %v", err) + return err } if err := startAssault(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, experimentsDetails.ChaosMonkeyAssault, pod); err != nil { - log.Errorf("[Chaos]: Failed to set assault, err: %v ", err) + log.Errorf("[Chaos]: Failed to set assault, err: %v", err) return err } if err := enableChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil { - log.Errorf("[Chaos]: Failed to enable chaos, err: %v ", err) + log.Errorf("[Chaos]: Failed to enable chaos, err: %v", err) return err } common.SetTargets(pod.Name, "injected", "pod", chaosDetails) @@ -358,7 +366,7 @@ loop: } // updating the chaosresult after stopped failStep := "Chaos injection stopped!" - types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep) + types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, cerrors.ErrorTypeExperimentAborted) result.ChaosResult(chaosDetails, clients, resultDetails, "EOT") log.Info("[Chaos]: Revert Completed") os.Exit(1) @@ -379,7 +387,7 @@ loop: } if len(errorList) != 0 { - return fmt.Errorf("error in disabling chaos monkey, err: %v", strings.Join(errorList, ", ")) + return cerrors.PreserveError{ErrString: fmt.Sprintf("error in disabling chaos monkey, [%s]", strings.Join(errorList, ","))} } return nil } diff --git a/chaoslib/litmus/stress-chaos/helper/stress-helper.go b/chaoslib/litmus/stress-chaos/helper/stress-helper.go index aadd5259c..1846d0197 100644 --- a/chaoslib/litmus/stress-chaos/helper/stress-helper.go +++ b/chaoslib/litmus/stress-chaos/helper/stress-helper.go @@ -4,6 +4,8 @@ import ( "bufio" "bytes" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "io" "os" "os/exec" @@ -72,6 +74,7 @@ func Helper(clients clients.ClientSets) { // Intialise the chaos attributes types.InitialiseChaosVariables(&chaosDetails) + chaosDetails.Phase = types.ChaosInjectPhase // Intialise Chaos Result Parameters types.SetResultAttributes(&resultDetails, chaosDetails) @@ -80,6 +83,10 @@ func Helper(clients clients.ClientSets) { result.SetResultUID(&resultDetails, clients, &chaosDetails) if err := prepareStressChaos(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails); err != nil { + // update failstep inside chaosresult + if resultErr := result.UpdateFailedStepFromHelper(&resultDetails, &chaosDetails, clients, err); resultErr != nil { + log.Fatalf("helper pod failed, err: %v, resultErr: %v", err, resultErr) + } log.Fatalf("helper pod failed, err: %v", err) } } @@ -89,13 +96,13 @@ func prepareStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, c // get stressors in list format stressorList := prepareStressor(experimentsDetails) if len(stressorList) == 0 { - return errors.Errorf("fail to prepare stressor for %v experiment", experimentsDetails.ExperimentName) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: chaosDetails.ChaosPodName, Reason: "fail to prepare stressors"} } stressors := strings.Join(stressorList, " ") - targetList, err := common.ParseTargets() + targetList, err := common.ParseTargets(chaosDetails.ChaosPodName) if err != nil { - return err + return stacktrace.Propagate(err, "could not parse targets") } var targets []targetDetails @@ -105,22 +112,23 @@ func prepareStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, c Name: t.Name, Namespace: t.Namespace, TargetContainer: t.TargetContainer, + Source: chaosDetails.ChaosPodName, } - td.ContainerId, err = common.GetContainerID(td.Namespace, td.Name, td.TargetContainer, clients) + td.ContainerId, err = common.GetContainerID(td.Namespace, td.Name, td.TargetContainer, clients, td.Source) if err != nil { - return err + return stacktrace.Propagate(err, "could not get container id") } // extract out the pid of the target container - td.Pid, err = common.GetPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath) + td.Pid, err = common.GetPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath, td.Source) if err != nil { - return err + return stacktrace.Propagate(err, "could not get container pid") } - td.CGroupManager, err = getCGroupManager(td.Pid, td.ContainerId) + td.CGroupManager, err = getCGroupManager(td) if err != nil { - return errors.Errorf("fail to get the cgroup manager, err: %v", err) + return stacktrace.Propagate(err, "could not get cgroup manager") } targets = append(targets, td) } @@ -140,14 +148,14 @@ func prepareStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, c for index, t := range targets { targets[index].Cmd, err = injectChaos(t, stressors) if err != nil { - return err + return stacktrace.Propagate(err, "could not inject chaos") } log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer) if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "injected", "pod", t.Name); err != nil { if revertErr := terminateProcess(t); revertErr != nil { - return fmt.Errorf("failed to revert and annotate the result, err: %v", fmt.Sprintf("%s, %s", err.Error(), revertErr.Error())) + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(revertErr).Error())} } - return err + return stacktrace.Propagate(err, "could not annotate chaosresult") } } @@ -201,7 +209,7 @@ func prepareStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, c } } if len(errList) != 0 { - return fmt.Errorf("err: %v", strings.Join(errList, ", ")) + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))} } case err := <-done: if err != nil { @@ -211,10 +219,10 @@ func prepareStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, c if status.Signaled() && status.Signal() == syscall.SIGKILL { // wait for the completion of abort handler time.Sleep(10 * time.Second) - return errors.Errorf("process stopped with SIGTERM signal") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Source: chaosDetails.ChaosPodName, Reason: fmt.Sprintf("process stopped with SIGTERM signal")} } } - return errors.Errorf("process exited before the actual cleanup, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: chaosDetails.ChaosPodName, Reason: err.Error()} } log.Info("[Info]: Reverting Chaos") var errList []string @@ -223,12 +231,13 @@ func prepareStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, c errList = append(errList, err.Error()) continue } + log.Infof("successfully reverted chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer) if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "reverted", "pod", t.Name); err != nil { errList = append(errList, err.Error()) } } if len(errList) != 0 { - return fmt.Errorf("err: %v", strings.Join(errList, ", ")) + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))} } } @@ -241,7 +250,7 @@ func terminateProcess(t targetDetails) error { if strings.Contains(err.Error(), ProcessAlreadyKilled) || strings.Contains(err.Error(), ProcessAlreadyFinished) { return nil } - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("failed to revert chaos: %s", err.Error())} } log.Infof("successfully reverted chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer) return nil @@ -316,27 +325,27 @@ func prepareStressor(experimentDetails *experimentTypes.ExperimentDetails) []str } //pidPath will get the pid path of the container -func pidPath(pid int) cgroups.Path { - processPath := "/proc/" + strconv.Itoa(pid) + "/cgroup" - paths, err := parseCgroupFile(processPath) +func pidPath(t targetDetails) cgroups.Path { + processPath := "/proc/" + strconv.Itoa(t.Pid) + "/cgroup" + paths, err := parseCgroupFile(processPath, t) if err != nil { return getErrorPath(errors.Wrapf(err, "parse cgroup file %s", processPath)) } - return getExistingPath(paths, pid, "") + return getExistingPath(paths, t.Pid, "") } //parseCgroupFile will read and verify the cgroup file entry of a container -func parseCgroupFile(path string) (map[string]string, error) { +func parseCgroupFile(path string, t targetDetails) (map[string]string, error) { file, err := os.Open(path) if err != nil { - return nil, errors.Errorf("unable to parse cgroup file: %v", err) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to parse cgroup: %s", err.Error())} } defer file.Close() - return parseCgroupFromReader(file) + return parseCgroupFromReader(file, t) } //parseCgroupFromReader will parse the cgroup file from the reader -func parseCgroupFromReader(r io.Reader) (map[string]string, error) { +func parseCgroupFromReader(r io.Reader, t targetDetails) (map[string]string, error) { var ( cgroups = make(map[string]string) s = bufio.NewScanner(r) @@ -347,7 +356,7 @@ func parseCgroupFromReader(r io.Reader) (map[string]string, error) { parts = strings.SplitN(text, ":", 3) ) if len(parts) < 3 { - return nil, errors.Errorf("invalid cgroup entry: %q", text) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("invalid cgroup entry: %q", text)} } for _, subs := range strings.Split(parts[1], ",") { if subs != "" { @@ -356,7 +365,7 @@ func parseCgroupFromReader(r io.Reader) (map[string]string, error) { } } if err := s.Err(); err != nil { - return nil, errors.Errorf("buffer scanner failed: %v", err) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("buffer scanner failed: %s", err.Error())} } return cgroups, nil @@ -423,18 +432,18 @@ func getCgroupDestination(pid int, subsystem string) (string, error) { } //findValidCgroup will be used to get a valid cgroup path -func findValidCgroup(path cgroups.Path, target string) (string, error) { +func findValidCgroup(path cgroups.Path, t targetDetails) (string, error) { for _, subsystem := range cgroupSubsystemList { path, err := path(cgroups.Name(subsystem)) if err != nil { - log.Errorf("fail to retrieve the cgroup path, subsystem: %v, target: %v, err: %v", subsystem, target, err) + log.Errorf("fail to retrieve the cgroup path, subsystem: %v, target: %v, err: %v", subsystem, t.ContainerId, err) continue } - if strings.Contains(path, target) { + if strings.Contains(path, t.ContainerId) { return path, nil } } - return "", errors.Errorf("never found valid cgroup for %s", target) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: "could not find valid cgroup"} } //getENV fetches all the env variables from the runner pod @@ -485,27 +494,27 @@ func abortWatcher(targets []targetDetails, resultName, chaosNS string) { } // getCGroupManager will return the cgroup for the given pid of the process -func getCGroupManager(pid int, containerID string) (interface{}, error) { +func getCGroupManager(t targetDetails) (interface{}, error) { if cgroups.Mode() == cgroups.Unified { - groupPath, err := cgroupsv2.PidGroupPath(pid) + groupPath, err := cgroupsv2.PidGroupPath(t.Pid) if err != nil { - return nil, errors.Errorf("Error in getting groupPath, %v", err) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to get pid group path: %s", err.Error())} } cgroup2, err := cgroupsv2.LoadManager("/sys/fs/cgroup", groupPath) if err != nil { - return nil, errors.Errorf("Error loading cgroup v2 manager, %v", err) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to load the cgroup: %s", err.Error())} } return cgroup2, nil } - path := pidPath(pid) - cgroup, err := findValidCgroup(path, containerID) + path := pidPath(t) + cgroup, err := findValidCgroup(path, t) if err != nil { - return nil, errors.Errorf("fail to get cgroup, err: %v", err) + return nil, stacktrace.Propagate(err, "could not find valid cgroup") } cgroup1, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(cgroup)) if err != nil { - return nil, errors.Errorf("fail to load the cgroup, err: %v", err) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to load the cgroup: %s", err.Error())} } return cgroup1, nil @@ -533,15 +542,15 @@ func injectChaos(t targetDetails, stressors string) (*exec.Cmd, error) { cmd.Stdout = &buf err = cmd.Start() if err != nil { - return nil, errors.Errorf("fail to start the stress process %v, err: %v", stressCommand, err) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("failed to start stress process: %s", err.Error())} } // add the stress process to the cgroup of target container if err = addProcessToCgroup(cmd.Process.Pid, t.CGroupManager); err != nil { if killErr := cmd.Process.Kill(); killErr != nil { - return nil, errors.Errorf("stressors failed killing %v process, err: %v", cmd.Process.Pid, killErr) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to add the stress process to cgroup %s and kill stress process: %s", err.Error(), killErr.Error())} } - return nil, errors.Errorf("fail to add the stress process into target container cgroup, err: %v", err) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to add the stress process to cgroup: %s", err.Error())} } log.Info("[Info]: Sending signal to resume the stress process") @@ -551,7 +560,7 @@ func injectChaos(t targetDetails, stressors string) (*exec.Cmd, error) { // remove pause and resume or start the stress process if err := cmd.Process.Signal(syscall.SIGCONT); err != nil { - return nil, errors.Errorf("fail to remove pause and start the stress process: %v", err) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to remove pause and start the stress process: %s", err.Error())} } return cmd, nil } @@ -564,4 +573,5 @@ type targetDetails struct { Pid int CGroupManager interface{} Cmd *exec.Cmd + Source string } diff --git a/chaoslib/litmus/stress-chaos/lib/stress-chaos.go b/chaoslib/litmus/stress-chaos/lib/stress-chaos.go index a5e76d3a8..258460546 100644 --- a/chaoslib/litmus/stress-chaos/lib/stress-chaos.go +++ b/chaoslib/litmus/stress-chaos/lib/stress-chaos.go @@ -3,6 +3,8 @@ package lib import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "strconv" "strings" @@ -13,7 +15,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,7 +24,7 @@ import ( func PrepareAndInjectStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { var err error - //Setup the tunables if provided in range + //Set up the tunables if provided in range SetChaosTunables(experimentsDetails) switch experimentsDetails.StressType { @@ -56,11 +57,11 @@ func PrepareAndInjectStressChaos(experimentsDetails *experimentTypes.ExperimentD // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("Please provide one of the appLabel or TARGET_PODS") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"} } targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails) if err != nil { - return err + return stacktrace.Propagate(err, "could not get target pods") } //Waiting for the ramp time before chaos injection @@ -73,28 +74,28 @@ func PrepareAndInjectStressChaos(experimentsDetails *experimentTypes.ExperimentD if experimentsDetails.ChaosServiceAccount == "" { experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients) if err != nil { - return errors.Errorf("unable to get the serviceAccountName, err: %v", err) + return stacktrace.Propagate(err, "could not experiment service account") } } if experimentsDetails.EngineName != "" { if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not set helper data") } } - experimentsDetails.IsTargetContainerProvided = (experimentsDetails.TargetContainer != "") + experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } return nil @@ -103,7 +104,6 @@ func PrepareAndInjectStressChaos(experimentsDetails *experimentTypes.ExperimentD // injectChaosInSerialMode inject the stress chaos in all target application serially (one by one) func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { @@ -116,10 +116,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Get the target container name of the application pod if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } + experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name } log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ @@ -129,7 +126,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai }) runID := common.GetRunID() if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) @@ -138,7 +135,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai log.Info("[Status]: Checking the status of the helper pods") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pods are not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } // Wait till the completion of the helper pod @@ -147,17 +144,16 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true) } //Deleting all the helper pod for stress chaos log.Info("[Cleanup]: Deleting the helper pod") err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients) if err != nil { - return errors.Errorf("unable to delete the helper pods, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } } - return nil } @@ -182,7 +178,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) + return stacktrace.Propagate(err, "could not create helper pod") } } @@ -192,7 +188,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet log.Info("[Status]: Checking the status of the helper pods") if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pods are not in running state, err: %v", err) + return stacktrace.Propagate(err, "could not check helper status") } // Wait till the completion of the helper pod @@ -201,14 +197,14 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName) if err != nil || podStatus == "Failed" { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) + return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true) } //Deleting all the helper pod for stress chaos log.Info("[Cleanup]: Deleting all the helper pod") err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients) if err != nil { - return errors.Errorf("unable to delete the helper pods, err: %v", err) + return stacktrace.Propagate(err, "could not delete helper pod(s)") } return nil @@ -293,8 +289,10 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err - + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil } // getPodEnv derive all the env required for the helper pod @@ -327,8 +325,8 @@ func ptrint64(p int64) *int64 { return &p } -//SetChaosTunables will setup a random value within a given range of values -//If the value is not provided in range it'll setup the initial provided value. +//SetChaosTunables will set up a random value within a given range of values +//If the value is not provided in range it'll set up the initial provided value. func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.CPUcores = common.ValidateRange(experimentsDetails.CPUcores) experimentsDetails.CPULoad = common.ValidateRange(experimentsDetails.CPULoad) diff --git a/chaoslib/litmus/vm-poweroff/lib/vm-poweroff.go b/chaoslib/litmus/vm-poweroff/lib/vm-poweroff.go index 3edb7d28c..e30557880 100644 --- a/chaoslib/litmus/vm-poweroff/lib/vm-poweroff.go +++ b/chaoslib/litmus/vm-poweroff/lib/vm-poweroff.go @@ -1,12 +1,14 @@ package lib import ( + "fmt" "os" "os/signal" "strings" "syscall" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/cloud/vmware" "github.com/litmuschaos/litmus-go/pkg/events" @@ -15,7 +17,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" experimentTypes "github.com/litmuschaos/litmus-go/pkg/vmware/vm-poweroff/types" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) var inject, abort chan os.Signal @@ -48,14 +50,14 @@ func InjectVMPowerOffChaos(experimentsDetails *experimentTypes.ExperimentDetails switch strings.ToLower(experimentsDetails.Sequence) { case "serial": if err := injectChaosInSerialMode(experimentsDetails, vmIdList, cookie, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": if err := injectChaosInParallelMode(experimentsDetails, vmIdList, cookie, clients, resultDetails, eventsDetails, chaosDetails); err != nil { - return err + return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)} } //Waiting for the ramp time after chaos injection @@ -93,7 +95,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Stopping the VM log.Infof("[Chaos]: Stopping %s VM", vmId) if err := vmware.StopVM(experimentsDetails.VcenterServer, vmId, cookie); err != nil { - return errors.Errorf("failed to stop %s vm: %s", vmId, err.Error()) + return stacktrace.Propagate(err, fmt.Sprintf("failed to stop %s vm", vmId)) } common.SetTargets(vmId, "injected", "VM", chaosDetails) @@ -101,14 +103,14 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Wait for the VM to completely stop log.Infof("[Wait]: Wait for VM '%s' to get in POWERED_OFF state", vmId) if err := vmware.WaitForVMStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.VcenterServer, vmId, cookie); err != nil { - return errors.Errorf("vm %s failed to successfully shutdown, err: %s", vmId, err.Error()) + return stacktrace.Propagate(err, "VM shutdown failed") } //Run the probes during the chaos //The OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -119,13 +121,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Starting the VM log.Infof("[Chaos]: Starting back %s VM", vmId) if err := vmware.StartVM(experimentsDetails.VcenterServer, vmId, cookie); err != nil { - return errors.Errorf("failed to start back %s vm: %s", vmId, err.Error()) + return stacktrace.Propagate(err, "failed to start back vm") } //Wait for the VM to completely start log.Infof("[Wait]: Wait for VM '%s' to get in POWERED_ON state", vmId) if err := vmware.WaitForVMStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.VcenterServer, vmId, cookie); err != nil { - return errors.Errorf("vm %s failed to successfully start, err: %s", vmId, err.Error()) + return stacktrace.Propagate(err, "vm failed to start") } common.SetTargets(vmId, "reverted", "VM", chaosDetails) @@ -165,7 +167,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Stopping the VM log.Infof("[Chaos]: Stopping %s VM", vmId) if err := vmware.StopVM(experimentsDetails.VcenterServer, vmId, cookie); err != nil { - return errors.Errorf("failed to stop %s vm: %s", vmId, err.Error()) + return stacktrace.Propagate(err, fmt.Sprintf("failed to stop %s vm", vmId)) } common.SetTargets(vmId, "injected", "VM", chaosDetails) @@ -176,14 +178,14 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Wait for the VM to completely stop log.Infof("[Wait]: Wait for VM '%s' to get in POWERED_OFF state", vmId) if err := vmware.WaitForVMStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.VcenterServer, vmId, cookie); err != nil { - return errors.Errorf("vm %s failed to successfully shutdown, err: %s", vmId, err.Error()) + return stacktrace.Propagate(err, "vm failed to shutdown") } } //Running the probes during chaos if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to run probes") } } @@ -196,7 +198,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Starting the VM log.Infof("[Chaos]: Starting back %s VM", vmId) if err := vmware.StartVM(experimentsDetails.VcenterServer, vmId, cookie); err != nil { - return errors.Errorf("failed to start back %s vm: %s", vmId, err.Error()) + return stacktrace.Propagate(err, fmt.Sprintf("failed to start back %s vm", vmId)) } } @@ -205,7 +207,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Wait for the VM to completely start log.Infof("[Wait]: Wait for VM '%s' to get in POWERED_ON state", vmId) if err := vmware.WaitForVMStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.VcenterServer, vmId, cookie); err != nil { - return errors.Errorf("vm %s failed to successfully start, err: %s", vmId, err.Error()) + return stacktrace.Propagate(err, "vm failed to successfully start") } } diff --git a/chaoslib/powerfulseal/pod-delete/lib/pod-delete.go b/chaoslib/powerfulseal/pod-delete/lib/pod-delete.go deleted file mode 100644 index d00908fb6..000000000 --- a/chaoslib/powerfulseal/pod-delete/lib/pod-delete.go +++ /dev/null @@ -1,272 +0,0 @@ -package lib - -import ( - "context" - "strconv" - "time" - - clients "github.com/litmuschaos/litmus-go/pkg/clients" - "github.com/litmuschaos/litmus-go/pkg/events" - experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-delete/types" - "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/litmuschaos/litmus-go/pkg/status" - "github.com/litmuschaos/litmus-go/pkg/types" - "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -//PreparePodDelete contains the prepration steps before chaos injection -func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - - //Waiting for the ramp time before chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - - if experimentsDetails.ChaosServiceAccount == "" { - // Getting the serviceAccountName for the powerfulseal pod - err := GetServiceAccount(experimentsDetails, clients) - if err != nil { - return errors.Errorf("Unable to get the serviceAccountName, err: %v", err) - } - } - - // generating a unique string which can be appended with the powerfulseal deployment name & labels for the uniquely identification - runID := common.GetRunID() - - // generating the chaos inject event in the chaosengine - if experimentsDetails.EngineName != "" { - msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on application pod" - types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails) - events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine") - } - - // Creating configmap for powerfulseal deployment - err := CreateConfigMap(experimentsDetails, clients, runID) - if err != nil { - return err - } - - // Creating powerfulseal deployment - err = CreatePowerfulsealDeployment(experimentsDetails, clients, runID) - if err != nil { - return errors.Errorf("Unable to create the helper pod, err: %v", err) - } - - //checking the status of the powerfulseal pod, wait till the powerfulseal pod comes to running state else fail the experiment - log.Info("[Status]: Checking the status of the helper pod") - err = status.CheckApplicationStatusesByLabels(experimentsDetails.ChaosNamespace, "name=powerfulseal-"+runID, experimentsDetails.Timeout, experimentsDetails.Delay, clients) - if err != nil { - return errors.Errorf("powerfulseal pod is not in running state, err: %v", err) - } - - // Wait for Chaos Duration - log.Infof("[Wait]: Waiting for the %vs chaos duration", experimentsDetails.ChaosDuration) - common.WaitForDuration(experimentsDetails.ChaosDuration) - - //Deleting the powerfulseal deployment - log.Info("[Cleanup]: Deleting the powerfulseal deployment") - err = DeletePowerfulsealDeployment(experimentsDetails, clients, runID) - if err != nil { - return errors.Errorf("Unable to delete the powerfulseal deployment, err: %v", err) - } - - //Deleting the powerfulseal configmap - log.Info("[Cleanup]: Deleting the powerfulseal configmap") - err = DeletePowerfulsealConfigmap(experimentsDetails, clients, runID) - if err != nil { - return errors.Errorf("Unable to delete the powerfulseal configmap, err: %v", err) - } - - //Waiting for the ramp time after chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - return nil -} - -// GetServiceAccount find the serviceAccountName for the powerfulseal deployment -func GetServiceAccount(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error { - pod, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Get(context.Background(), experimentsDetails.ChaosPodName, v1.GetOptions{}) - if err != nil { - return err - } - experimentsDetails.ChaosServiceAccount = pod.Spec.ServiceAccountName - return nil -} - -// CreateConfigMap creates a configmap for the powerfulseal deployment -func CreateConfigMap(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, runID string) error { - - data := map[string]string{} - - // It will store all the details inside a string in well formated way - policy := GetConfigMapData(experimentsDetails) - - data["policy"] = policy - configMap := &apiv1.ConfigMap{ - ObjectMeta: v1.ObjectMeta{ - Name: "policy-" + runID, - Namespace: experimentsDetails.ChaosNamespace, - Labels: map[string]string{ - "name": "policy-" + runID, - }, - }, - Data: data, - } - - _, err := clients.KubeClient.CoreV1().ConfigMaps(experimentsDetails.ChaosNamespace).Create(context.Background(), configMap, v1.CreateOptions{}) - - return err -} - -// GetConfigMapData generates the configmap data for the powerfulseal deployments in desired format format -func GetConfigMapData(experimentsDetails *experimentTypes.ExperimentDetails) string { - - waitTime, _ := strconv.Atoi(experimentsDetails.ChaosInterval) - policy := "config:" + "\n" + - " minSecondsBetweenRuns: 1" + "\n" + - " maxSecondsBetweenRuns: " + strconv.Itoa(waitTime) + "\n" + - "podScenarios:" + "\n" + - " - name: \"delete random pods in application namespace\"" + "\n" + - " match:" + "\n" + - " - labels:" + "\n" + - " namespace: " + experimentsDetails.AppNS + "\n" + - " selector: " + experimentsDetails.AppLabel + "\n" + - " filters:" + "\n" + - " - randomSample:" + "\n" + - " size: 1" + "\n" + - " actions:" + "\n" + - " - kill:" + "\n" + - " probability: 0.77" + "\n" + - " force: " + strconv.FormatBool(experimentsDetails.Force) - - return policy - -} - -// CreatePowerfulsealDeployment derive the attributes for powerfulseal deployment and create it -func CreatePowerfulsealDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, runID string) error { - - deployment := &appsv1.Deployment{ - ObjectMeta: v1.ObjectMeta{ - Name: "powerfulseal-" + runID, - Namespace: experimentsDetails.ChaosNamespace, - Labels: map[string]string{ - "app": "powerfulseal", - "name": "powerfulseal-" + runID, - "chaosUID": string(experimentsDetails.ChaosUID), - "app.kubernetes.io/part-of": "litmus", - }, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &v1.LabelSelector{ - MatchLabels: map[string]string{ - "name": "powerfulseal-" + runID, - "chaosUID": string(experimentsDetails.ChaosUID), - }, - }, - Replicas: func(i int32) *int32 { return &i }(1), - Template: apiv1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{ - Labels: map[string]string{ - "name": "powerfulseal-" + runID, - "chaosUID": string(experimentsDetails.ChaosUID), - }, - }, - Spec: apiv1.PodSpec{ - Volumes: []apiv1.Volume{ - { - Name: "policyfile", - VolumeSource: apiv1.VolumeSource{ - ConfigMap: &apiv1.ConfigMapVolumeSource{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: "policy-" + runID, - }, - }, - }, - }, - }, - ServiceAccountName: experimentsDetails.ChaosServiceAccount, - TerminationGracePeriodSeconds: func(i int64) *int64 { return &i }(0), - Containers: []apiv1.Container{ - { - Name: "powerfulseal", - Image: "ksatchit/miko-powerfulseal:non-ssh", - Args: []string{ - "autonomous", - "--inventory-kubernetes", - "--no-cloud", - "--policy-file=/root/policy_kill_random_default.yml", - "--use-pod-delete-instead-of-ssh-kill", - }, - VolumeMounts: []apiv1.VolumeMount{ - { - Name: "policyfile", - MountPath: "/root/policy_kill_random_default.yml", - SubPath: "policy", - }, - }, - }, - }, - }, - }, - }, - } - - _, err := clients.KubeClient.AppsV1().Deployments(experimentsDetails.ChaosNamespace).Create(context.Background(), deployment, v1.CreateOptions{}) - return err - -} - -//DeletePowerfulsealDeployment delete the powerfulseal deployment -func DeletePowerfulsealDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, runID string) error { - - err := clients.KubeClient.AppsV1().Deployments(experimentsDetails.ChaosNamespace).Delete(context.Background(), "powerfulseal-"+runID, v1.DeleteOptions{}) - - if err != nil { - return err - } - - err = retry. - Times(90). - Wait(1 * time.Second). - Try(func(attempt uint) error { - podSpec, err := clients.KubeClient.AppsV1().Deployments(experimentsDetails.ChaosNamespace).List(context.Background(), v1.ListOptions{LabelSelector: "name=powerfulseal-" + runID}) - if err != nil || len(podSpec.Items) != 0 { - return errors.Errorf("Deployment is not deleted yet, err: %v", err) - } - return nil - }) - - return err -} - -//DeletePowerfulsealConfigmap delete the powerfulseal configmap -func DeletePowerfulsealConfigmap(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, runID string) error { - - err := clients.KubeClient.CoreV1().ConfigMaps(experimentsDetails.ChaosNamespace).Delete(context.Background(), "policy-"+runID, v1.DeleteOptions{}) - - if err != nil { - return err - } - - err = retry. - Times(90). - Wait(1 * time.Second). - Try(func(attempt uint) error { - podSpec, err := clients.KubeClient.CoreV1().ConfigMaps(experimentsDetails.ChaosNamespace).List(context.Background(), v1.ListOptions{LabelSelector: "name=policy-" + runID}) - if err != nil || len(podSpec.Items) != 0 { - return errors.Errorf("configmap is not deleted yet, err: %v", err) - } - return nil - }) - - return err -} diff --git a/chaoslib/pumba/container-kill/lib/container-kill.go b/chaoslib/pumba/container-kill/lib/container-kill.go deleted file mode 100644 index 9f43cc278..000000000 --- a/chaoslib/pumba/container-kill/lib/container-kill.go +++ /dev/null @@ -1,362 +0,0 @@ -package lib - -import ( - "context" - "fmt" - "strconv" - "strings" - "time" - - litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/container-kill/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" - "github.com/litmuschaos/litmus-go/pkg/events" - experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/container-kill/types" - "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/litmuschaos/litmus-go/pkg/probe" - "github.com/litmuschaos/litmus-go/pkg/status" - "github.com/litmuschaos/litmus-go/pkg/types" - "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -//PrepareContainerKill contains the prepration steps before chaos injection -func PrepareContainerKill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - - // Get the target pod details for the chaos execution - // if the target pod is not defined it will derive the random target pod list using pod affected percentage - if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") - } - //Setup the tunables if provided in range - litmusLIB.SetChaosTunables(experimentsDetails) - - log.InfoWithValues("[Info]: The tunables are:", logrus.Fields{ - "PodsAffectedPerc": experimentsDetails.PodsAffectedPerc, - "Sequence": experimentsDetails.Sequence, - }) - podsAffectedPerc, _ := strconv.Atoi(experimentsDetails.PodsAffectedPerc) - targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, podsAffectedPerc, clients, chaosDetails) - if err != nil { - return err - } - - podNames := []string{} - for _, pod := range targetPodList.Items { - podNames = append(podNames, pod.Name) - } - log.Infof("Target pods list for chaos, %v", podNames) - - //Waiting for the ramp time before chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - - if experimentsDetails.EngineName != "" { - if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err - } - } - - if experimentsDetails.EngineName != "" { - msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on target pod" - types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails) - events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine") - } - - experimentsDetails.IsTargetContainerProvided = (experimentsDetails.TargetContainer != "") - switch strings.ToLower(experimentsDetails.Sequence) { - case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err - } - case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err - } - default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) - } - - //Waiting for the ramp time after chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - return nil -} - -// injectChaosInSerialMode kill the container of all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - - var err error - // run the probes during chaos - if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err - } - } - - // creating the helper pod to perform container kill chaos - for _, pod := range targetPodList.Items { - - //GetRestartCount return the restart count of target container - restartCountBefore := getRestartCount(pod, experimentsDetails.TargetContainer) - log.Infof("restartCount of target container before chaos injection: %v", restartCountBefore) - - runID := common.GetRunID() - - //Get the target container name of the application pod - if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } - } - - log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ - "Target Pod": pod.Name, - "NodeName": pod.Spec.NodeName, - "Target Container": experimentsDetails.TargetContainer, - }) - - if err := createHelperPod(experimentsDetails, clients, chaosDetails, pod.Name, pod.Spec.NodeName, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) - } - - common.SetTargets(pod.Name, "targeted", "pod", chaosDetails) - - appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) - - //checking the status of the helper pod, wait till the pod comes to running state else fail the experiment - log.Info("[Status]: Checking the status of the helper pod") - if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) - } - - log.Infof("[Wait]: Waiting for the %vs chaos duration", experimentsDetails.ChaosDuration) - common.WaitForDuration(experimentsDetails.ChaosDuration) - - // It will verify that the restart count of container should increase after chaos injection - if err := verifyRestartCount(experimentsDetails, pod, clients, restartCountBefore); err != nil { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("target container is not restarted, err: %v", err) - } - - //Deleting the helper pod - log.Info("[Cleanup]: Deleting the helper pod") - if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) - } - } - - return nil -} - -// injectChaosInParallelMode kill the container of all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - - var err error - //GetRestartCount return the restart count of target container - restartCountBefore := getRestartCountAll(targetPodList, experimentsDetails.TargetContainer) - log.Infof("restartCount of target containers before chaos injection: %v", restartCountBefore) - - // run the probes during chaos - if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err - } - } - - runID := common.GetRunID() - - // creating the helper pod to perform container kill chaos - for _, pod := range targetPodList.Items { - - //Get the target container name of the application pod - if !experimentsDetails.IsTargetContainerProvided { - experimentsDetails.TargetContainer, err = common.GetTargetContainer(pod.Namespace, pod.Name, clients) - if err != nil { - return errors.Errorf("unable to get the target container name, err: %v", err) - } - } - - log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ - "Target Pod": pod.Name, - "NodeName": pod.Spec.NodeName, - "Target Container": experimentsDetails.TargetContainer, - }) - - if err := createHelperPod(experimentsDetails, clients, chaosDetails, pod.Name, pod.Spec.NodeName, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) - } - common.SetTargets(pod.Name, "targeted", "pod", chaosDetails) - } - - appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) - - //checking the status of the helper pod, wait till the pod comes to running state else fail the experiment - log.Info("[Status]: Checking the status of the helper pod") - if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) - } - - log.Infof("[Wait]: Waiting for the %vs chaos duration", experimentsDetails.ChaosDuration) - common.WaitForDuration(experimentsDetails.ChaosDuration) - - // It will verify that the restart count of container should increase after chaos injection - if err := verifyRestartCountAll(experimentsDetails, targetPodList, clients, restartCountBefore); err != nil { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("target container is not restarted , err: %v", err) - } - - //Deleting the helper pod - log.Info("[Cleanup]: Deleting the helper pod") - if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) - } - - return nil -} - -//getRestartCount return the restart count of target container -func getRestartCount(targetPod apiv1.Pod, containerName string) int { - restartCount := 0 - for _, container := range targetPod.Status.ContainerStatuses { - if container.Name == containerName { - restartCount = int(container.RestartCount) - break - } - } - return restartCount -} - -//getRestartCountAll return the restart count of all target container -func getRestartCountAll(targetPodList apiv1.PodList, containerName string) []int { - restartCount := []int{} - for _, pod := range targetPodList.Items { - restartCount = append(restartCount, getRestartCount(pod, containerName)) - } - - return restartCount -} - -//verifyRestartCount verify the restart count of target container that it is restarted or not after chaos injection -// the restart count of container should increase after chaos injection -func verifyRestartCount(experimentsDetails *experimentTypes.ExperimentDetails, pod apiv1.Pod, clients clients.ClientSets, restartCountBefore int) error { - - restartCountAfter := 0 - err := retry. - Times(90). - Wait(1 * time.Second). - Try(func(attempt uint) error { - pod, err := clients.KubeClient.CoreV1().Pods(pod.Namespace).Get(context.Background(), pod.Name, v1.GetOptions{}) - if err != nil { - return err - } - for _, container := range pod.Status.ContainerStatuses { - if container.Name == experimentsDetails.TargetContainer { - restartCountAfter = int(container.RestartCount) - break - } - } - return nil - }) - - if err != nil { - return err - } - - // it will fail if restart count won't increase - if restartCountAfter <= restartCountBefore { - return errors.Errorf("target container is not restarted") - } - - log.Infof("restartCount of target container after chaos injection: %v", restartCountAfter) - - return nil -} - -//verifyRestartCountAll verify the restart count of all the target container that it is restarted or not after chaos injection -// the restart count of container should increase after chaos injection -func verifyRestartCountAll(experimentsDetails *experimentTypes.ExperimentDetails, podList apiv1.PodList, clients clients.ClientSets, restartCountBefore []int) error { - - for index, pod := range podList.Items { - - if err := verifyRestartCount(experimentsDetails, pod, clients, restartCountBefore[index]); err != nil { - return err - } - } - return nil -} - -// createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, appName, appNodeName, runID string) error { - - helperPod := &apiv1.Pod{ - ObjectMeta: v1.ObjectMeta{ - GenerateName: experimentsDetails.ExperimentName + "-helper-", - Namespace: experimentsDetails.ChaosNamespace, - Labels: common.GetHelperLabels(chaosDetails.Labels, runID, experimentsDetails.ExperimentName), - Annotations: chaosDetails.Annotations, - }, - Spec: apiv1.PodSpec{ - RestartPolicy: apiv1.RestartPolicyNever, - ImagePullSecrets: chaosDetails.ImagePullSecrets, - NodeName: appNodeName, - Volumes: []apiv1.Volume{ - { - Name: "dockersocket", - VolumeSource: apiv1.VolumeSource{ - HostPath: &apiv1.HostPathVolumeSource{ - Path: experimentsDetails.SocketPath, - }, - }, - }, - }, - Containers: []apiv1.Container{ - { - Name: experimentsDetails.ExperimentName, - Image: experimentsDetails.LIBImage, - ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy), - Command: []string{ - "sudo", - "-E", - }, - Args: []string{ - "pumba", - "--random", - "--interval", - strconv.Itoa(experimentsDetails.ChaosInterval) + "s", - "kill", - "--signal", - experimentsDetails.Signal, - "re2:k8s_" + experimentsDetails.TargetContainer + "_" + appName, - }, - Env: []apiv1.EnvVar{ - { - Name: "DOCKER_HOST", - Value: "unix://" + experimentsDetails.SocketPath, - }, - }, - Resources: chaosDetails.Resources, - VolumeMounts: []apiv1.VolumeMount{ - { - Name: "dockersocket", - MountPath: experimentsDetails.SocketPath, - }, - }, - }, - }, - }, - } - - _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err -} diff --git a/chaoslib/pumba/cpu-chaos/lib/cpu-chaos.go b/chaoslib/pumba/cpu-chaos/lib/cpu-chaos.go deleted file mode 100644 index 114ecdeae..000000000 --- a/chaoslib/pumba/cpu-chaos/lib/cpu-chaos.go +++ /dev/null @@ -1,273 +0,0 @@ -package lib - -import ( - "context" - "fmt" - "strconv" - "strings" - - litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" - "github.com/litmuschaos/litmus-go/pkg/events" - experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/types" - "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/litmuschaos/litmus-go/pkg/probe" - "github.com/litmuschaos/litmus-go/pkg/status" - "github.com/litmuschaos/litmus-go/pkg/types" - "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// PreparePodCPUHog contains prepration steps before chaos injection -func PreparePodCPUHog(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - - //setup the tunables if provided in range - litmusLIB.SetChaosTunables(experimentsDetails) - - // Get the target pod details for the chaos execution - // if the target pod is not defined it will derive the random target pod list using pod affected percentage - if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") - } - podsAffectedPerc, _ := strconv.Atoi(experimentsDetails.PodsAffectedPerc) - targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, podsAffectedPerc, clients, chaosDetails) - if err != nil { - return err - } - - podNames := []string{} - for _, pod := range targetPodList.Items { - podNames = append(podNames, pod.Name) - } - log.Infof("Target pods list for chaos, %v", podNames) - - //Waiting for the ramp time before chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - - if experimentsDetails.EngineName != "" { - msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on target pod" - types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails) - events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine") - } - - if experimentsDetails.EngineName != "" { - if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err - } - } - switch strings.ToLower(experimentsDetails.Sequence) { - case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err - } - case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err - } - default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) - } - - //Waiting for the ramp time after chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - return nil -} - -// injectChaosInSerialMode stress the cpu of all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - - // run the probes during chaos - if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err - } - } - - // creating the helper pod to perform cpu chaos - for _, pod := range targetPodList.Items { - - runID := common.GetRunID() - - log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ - "Target Pod": pod.Name, - "NodeName": pod.Spec.NodeName, - "CPUcores": experimentsDetails.CPUcores, - }) - - if err := createHelperPod(experimentsDetails, clients, chaosDetails, pod.Name, pod.Spec.NodeName, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) - } - - appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) - - //checking the status of the helper pod, wait till the pod comes to running state else fail the experiment - log.Info("[Status]: Checking the status of the helper pod") - if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) - } - common.SetTargets(pod.Name, "targeted", "pod", chaosDetails) - - // Wait till the completion of helper pod - log.Info("[Wait]: Waiting till the completion of the helper pod") - podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, "pumba-stress") - if err != nil || podStatus == "Failed" { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) - } - - //Deleting the helper pod - log.Info("[Cleanup]: Deleting the helper pod") - if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) - } - } - - return nil -} - -// injectChaosInParallelMode kill the container of all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - - // run the probes during chaos - if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err - } - } - - runID := common.GetRunID() - - // creating the helper pod to perform cpu chaos - for _, pod := range targetPodList.Items { - - log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ - "Target Pod": pod.Name, - "NodeName": pod.Spec.NodeName, - "CPUcores": experimentsDetails.CPUcores, - }) - - if err := createHelperPod(experimentsDetails, clients, chaosDetails, pod.Name, pod.Spec.NodeName, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) - } - } - - appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) - - //checking the status of the helper pod, wait till the pod comes to running state else fail the experiment - log.Info("[Status]: Checking the status of the helper pod") - if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) - } - for _, pod := range targetPodList.Items { - common.SetTargets(pod.Name, "targeted", "pod", chaosDetails) - } - - // Wait till the completion of helper pod - log.Info("[Wait]: Waiting till the completion of the helper pod") - podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, "pumba-stress") - if err != nil || podStatus == "Failed" { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) - } - - //Deleting the helper pod - log.Info("[Cleanup]: Deleting the helper pod") - if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) - } - - return nil -} - -// createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, appName, appNodeName, runID string) error { - - helperPod := &apiv1.Pod{ - ObjectMeta: v1.ObjectMeta{ - GenerateName: experimentsDetails.ExperimentName + "-helper-", - Namespace: experimentsDetails.ChaosNamespace, - Labels: common.GetHelperLabels(chaosDetails.Labels, runID, experimentsDetails.ExperimentName), - Annotations: chaosDetails.Annotations, - }, - Spec: apiv1.PodSpec{ - RestartPolicy: apiv1.RestartPolicyNever, - ImagePullSecrets: chaosDetails.ImagePullSecrets, - NodeName: appNodeName, - Volumes: []apiv1.Volume{ - { - Name: "dockersocket", - VolumeSource: apiv1.VolumeSource{ - HostPath: &apiv1.HostPathVolumeSource{ - Path: experimentsDetails.SocketPath, - }, - }, - }, - }, - Containers: []apiv1.Container{ - { - Name: "pumba-stress", - Image: experimentsDetails.LIBImage, - Command: []string{ - "sudo", - "-E", - }, - Args: getContainerArguments(experimentsDetails, appName), - Env: []apiv1.EnvVar{ - { - Name: "DOCKER_HOST", - Value: "unix://" + experimentsDetails.SocketPath, - }, - }, - Resources: chaosDetails.Resources, - VolumeMounts: []apiv1.VolumeMount{ - { - Name: "dockersocket", - MountPath: experimentsDetails.SocketPath, - }, - }, - ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy), - SecurityContext: &apiv1.SecurityContext{ - Capabilities: &apiv1.Capabilities{ - Add: []apiv1.Capability{ - "SYS_ADMIN", - }, - }, - }, - }, - }, - }, - } - - _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err -} - -// getContainerArguments derives the args for the pumba stress helper pod -func getContainerArguments(experimentsDetails *experimentTypes.ExperimentDetails, appName string) []string { - stressArgs := []string{ - "pumba", - "--log-level", - "debug", - "--label", - "io.kubernetes.pod.name=" + appName, - "stress", - "--duration", - strconv.Itoa(experimentsDetails.ChaosDuration) + "s", - "--stress-image", - experimentsDetails.StressImage, - "--stressors", - "--cpu " + experimentsDetails.CPUcores + " --timeout " + strconv.Itoa(experimentsDetails.ChaosDuration) + "s", - } - return stressArgs -} diff --git a/chaoslib/pumba/memory-chaos/lib/memory-chaos.go b/chaoslib/pumba/memory-chaos/lib/memory-chaos.go deleted file mode 100644 index fbea66104..000000000 --- a/chaoslib/pumba/memory-chaos/lib/memory-chaos.go +++ /dev/null @@ -1,274 +0,0 @@ -package lib - -import ( - "context" - "fmt" - "strconv" - "strings" - - litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" - "github.com/litmuschaos/litmus-go/pkg/events" - experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/types" - "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/litmuschaos/litmus-go/pkg/probe" - "github.com/litmuschaos/litmus-go/pkg/status" - "github.com/litmuschaos/litmus-go/pkg/types" - "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// PreparePodMemoryHog contains prepration steps before chaos injection -func PreparePodMemoryHog(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - - //setup the tunables if provided in range - litmusLIB.SetChaosTunables(experimentsDetails) - - // Get the target pod details for the chaos execution - // if the target pod is not defined it will derive the random target pod list using pod affected percentage - if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") - } - podsAffectedPerc, _ := strconv.Atoi(experimentsDetails.PodsAffectedPerc) - targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, podsAffectedPerc, clients, chaosDetails) - if err != nil { - return err - } - - podNames := []string{} - for _, pod := range targetPodList.Items { - podNames = append(podNames, pod.Name) - } - log.Infof("Target pods list for chaos, %v", podNames) - - //Waiting for the ramp time before chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - - if experimentsDetails.EngineName != "" { - msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on target pod" - types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails) - events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine") - } - - if experimentsDetails.EngineName != "" { - if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err - } - } - - switch strings.ToLower(experimentsDetails.Sequence) { - case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err - } - case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err - } - default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) - } - - //Waiting for the ramp time after chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - return nil -} - -// injectChaosInSerialMode stress the cpu of all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - - // run the probes during chaos - if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err - } - } - - // creating the helper pod to perform memory chaos - for _, pod := range targetPodList.Items { - - runID := common.GetRunID() - - log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ - "Target Pod": pod.Name, - "NodeName": pod.Spec.NodeName, - "MemoryBytes": experimentsDetails.MemoryConsumption, - }) - - if err := createHelperPod(experimentsDetails, clients, chaosDetails, pod.Name, pod.Spec.NodeName, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) - } - - appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) - - //checking the status of the helper pod, wait till the pod comes to running state else fail the experiment - log.Info("[Status]: Checking the status of the helper pod") - if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) - } - common.SetTargets(pod.Name, "targeted", "pod", chaosDetails) - - // Wait till the completion of helper pod - log.Info("[Wait]: Waiting till the completion of the helper pod") - podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, "pumba-stress") - if err != nil || podStatus == "Failed" { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) - } - - //Deleting the helper pod - log.Info("[Cleanup]: Deleting the helper pod") - if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) - } - } - - return nil -} - -// injectChaosInParallelMode kill the container of all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - - // run the probes during chaos - if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err - } - } - - runID := common.GetRunID() - - // creating the helper pod to perform memory chaos - for _, pod := range targetPodList.Items { - - log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ - "Target Pod": pod.Name, - "NodeName": pod.Spec.NodeName, - "MemoryBytes": experimentsDetails.MemoryConsumption, - }) - - if err := createHelperPod(experimentsDetails, clients, chaosDetails, pod.Name, pod.Spec.NodeName, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) - } - } - - appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) - - //checking the status of the helper pod, wait till the pod comes to running state else fail the experiment - log.Info("[Status]: Checking the status of the helper pod") - if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) - } - for _, pod := range targetPodList.Items { - common.SetTargets(pod.Name, "targeted", "pod", chaosDetails) - } - - // Wait till the completion of helper pod - log.Info("[Wait]: Waiting till the completion of the helper pod") - podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, "pumba-stress") - if err != nil || podStatus == "Failed" { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) - } - - //Deleting the helper pod - log.Info("[Cleanup]: Deleting the helper pod") - if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) - } - - return nil -} - -// createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, appName, appNodeName, runID string) error { - - helperPod := &apiv1.Pod{ - ObjectMeta: v1.ObjectMeta{ - GenerateName: experimentsDetails.ExperimentName + "-helper-", - Namespace: experimentsDetails.ChaosNamespace, - Labels: common.GetHelperLabels(chaosDetails.Labels, runID, experimentsDetails.ExperimentName), - Annotations: chaosDetails.Annotations, - }, - Spec: apiv1.PodSpec{ - RestartPolicy: apiv1.RestartPolicyNever, - ImagePullSecrets: chaosDetails.ImagePullSecrets, - NodeName: appNodeName, - Volumes: []apiv1.Volume{ - { - Name: "dockersocket", - VolumeSource: apiv1.VolumeSource{ - HostPath: &apiv1.HostPathVolumeSource{ - Path: experimentsDetails.SocketPath, - }, - }, - }, - }, - Containers: []apiv1.Container{ - { - Name: "pumba-stress", - Image: experimentsDetails.LIBImage, - Command: []string{ - "sudo", - "-E", - }, - Args: getContainerArguments(experimentsDetails, appName), - Env: []apiv1.EnvVar{ - { - Name: "DOCKER_HOST", - Value: "unix://" + experimentsDetails.SocketPath, - }, - }, - Resources: chaosDetails.Resources, - VolumeMounts: []apiv1.VolumeMount{ - { - Name: "dockersocket", - MountPath: experimentsDetails.SocketPath, - }, - }, - ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy), - SecurityContext: &apiv1.SecurityContext{ - Capabilities: &apiv1.Capabilities{ - Add: []apiv1.Capability{ - "SYS_ADMIN", - }, - }, - }, - }, - }, - }, - } - - _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err -} - -// getContainerArguments derives the args for the pumba stress helper pod -func getContainerArguments(experimentsDetails *experimentTypes.ExperimentDetails, appName string) []string { - stressArgs := []string{ - "pumba", - "--log-level", - "debug", - "--label", - "io.kubernetes.pod.name=" + appName, - "stress", - "--duration", - strconv.Itoa(experimentsDetails.ChaosDuration) + "s", - "--stress-image", - experimentsDetails.StressImage, - "--stressors", - "--cpu 1 --vm 1 --vm-bytes " + experimentsDetails.MemoryConsumption + "M --timeout " + strconv.Itoa(experimentsDetails.ChaosDuration) + "s", - } - return stressArgs -} diff --git a/chaoslib/pumba/network-chaos/lib/corruption/corruption.go b/chaoslib/pumba/network-chaos/lib/corruption/corruption.go deleted file mode 100644 index b7daf9883..000000000 --- a/chaoslib/pumba/network-chaos/lib/corruption/corruption.go +++ /dev/null @@ -1,43 +0,0 @@ -package corruption - -import ( - "strconv" - - network_chaos "github.com/litmuschaos/litmus-go/chaoslib/pumba/network-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" - experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" - "github.com/litmuschaos/litmus-go/pkg/types" -) - -//PodNetworkCorruptionChaos contains the steps to prepare and inject chaos -func PodNetworkCorruptionChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - - args, err := getContainerArguments(experimentsDetails) - if err != nil { - return err - } - return network_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) -} - -// getContainerArguments derives the args for the pumba pod -func getContainerArguments(experimentsDetails *experimentTypes.ExperimentDetails) ([]string, error) { - baseArgs := []string{ - "pumba", - "netem", - "--tc-image", - experimentsDetails.TCImage, - "--interface", - experimentsDetails.NetworkInterface, - "--duration", - strconv.Itoa(experimentsDetails.ChaosDuration) + "s", - } - - args := baseArgs - args, err := network_chaos.AddTargetIpsArgs(experimentsDetails.DestinationIPs, experimentsDetails.DestinationHosts, args) - if err != nil { - return args, err - } - args = append(args, "corrupt", "--percent", experimentsDetails.NetworkPacketCorruptionPercentage) - - return args, nil -} diff --git a/chaoslib/pumba/network-chaos/lib/duplication/duplication.go b/chaoslib/pumba/network-chaos/lib/duplication/duplication.go deleted file mode 100644 index ccb9b4960..000000000 --- a/chaoslib/pumba/network-chaos/lib/duplication/duplication.go +++ /dev/null @@ -1,43 +0,0 @@ -package duplication - -import ( - "strconv" - - network_chaos "github.com/litmuschaos/litmus-go/chaoslib/pumba/network-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" - experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" - "github.com/litmuschaos/litmus-go/pkg/types" -) - -//PodNetworkDuplicationChaos contains the steps to prepare and inject chaos -func PodNetworkDuplicationChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - - args, err := getContainerArguments(experimentsDetails) - if err != nil { - return err - } - return network_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) -} - -// getContainerArguments derives the args for the pumba pod -func getContainerArguments(experimentsDetails *experimentTypes.ExperimentDetails) ([]string, error) { - baseArgs := []string{ - "pumba", - "netem", - "--tc-image", - experimentsDetails.TCImage, - "--interface", - experimentsDetails.NetworkInterface, - "--duration", - strconv.Itoa(experimentsDetails.ChaosDuration) + "s", - } - - args := baseArgs - args, err := network_chaos.AddTargetIpsArgs(experimentsDetails.DestinationIPs, experimentsDetails.DestinationHosts, args) - if err != nil { - return args, err - } - args = append(args, "duplicate", "--percent", experimentsDetails.NetworkPacketDuplicationPercentage) - - return args, nil -} diff --git a/chaoslib/pumba/network-chaos/lib/latency/latency.go b/chaoslib/pumba/network-chaos/lib/latency/latency.go deleted file mode 100644 index 2f7b7b69a..000000000 --- a/chaoslib/pumba/network-chaos/lib/latency/latency.go +++ /dev/null @@ -1,43 +0,0 @@ -package latency - -import ( - "strconv" - - network_chaos "github.com/litmuschaos/litmus-go/chaoslib/pumba/network-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" - experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" - "github.com/litmuschaos/litmus-go/pkg/types" -) - -//PodNetworkLatencyChaos contains the steps to prepare and inject chaos -func PodNetworkLatencyChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - - args, err := getContainerArguments(experimentsDetails) - if err != nil { - return err - } - return network_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) -} - -// getContainerArguments derives the args for the pumba pod -func getContainerArguments(experimentsDetails *experimentTypes.ExperimentDetails) ([]string, error) { - baseArgs := []string{ - "pumba", - "netem", - "--tc-image", - experimentsDetails.TCImage, - "--interface", - experimentsDetails.NetworkInterface, - "--duration", - strconv.Itoa(experimentsDetails.ChaosDuration) + "s", - } - - args := baseArgs - args, err := network_chaos.AddTargetIpsArgs(experimentsDetails.DestinationIPs, experimentsDetails.DestinationHosts, args) - if err != nil { - return args, err - } - args = append(args, "delay", "--time", strconv.Itoa(experimentsDetails.NetworkLatency)) - - return args, nil -} diff --git a/chaoslib/pumba/network-chaos/lib/loss/loss.go b/chaoslib/pumba/network-chaos/lib/loss/loss.go deleted file mode 100644 index 2879b3739..000000000 --- a/chaoslib/pumba/network-chaos/lib/loss/loss.go +++ /dev/null @@ -1,43 +0,0 @@ -package loss - -import ( - "strconv" - - network_chaos "github.com/litmuschaos/litmus-go/chaoslib/pumba/network-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" - experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" - "github.com/litmuschaos/litmus-go/pkg/types" -) - -//PodNetworkLossChaos contains the steps to prepare and inject chaos -func PodNetworkLossChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - - args, err := getContainerArguments(experimentsDetails) - if err != nil { - return err - } - return network_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) -} - -// getContainerArguments derives the args for the pumba pod -func getContainerArguments(experimentsDetails *experimentTypes.ExperimentDetails) ([]string, error) { - baseArgs := []string{ - "pumba", - "netem", - "--tc-image", - experimentsDetails.TCImage, - "--interface", - experimentsDetails.NetworkInterface, - "--duration", - strconv.Itoa(experimentsDetails.ChaosDuration) + "s", - } - - args := baseArgs - args, err := network_chaos.AddTargetIpsArgs(experimentsDetails.DestinationIPs, experimentsDetails.DestinationHosts, args) - if err != nil { - return args, err - } - args = append(args, "loss", "--percent", experimentsDetails.NetworkPacketLossPercentage) - - return args, nil -} diff --git a/chaoslib/pumba/network-chaos/lib/network-chaos.go b/chaoslib/pumba/network-chaos/lib/network-chaos.go deleted file mode 100644 index 26858ccd1..000000000 --- a/chaoslib/pumba/network-chaos/lib/network-chaos.go +++ /dev/null @@ -1,302 +0,0 @@ -package lib - -import ( - "context" - "fmt" - "strconv" - "strings" - - litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib" - network_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" - "github.com/litmuschaos/litmus-go/pkg/events" - experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" - "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/litmuschaos/litmus-go/pkg/probe" - "github.com/litmuschaos/litmus-go/pkg/status" - "github.com/litmuschaos/litmus-go/pkg/types" - "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -//PrepareAndInjectChaos contains the prepration and chaos injection steps -func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, args []string) error { - - // Get the target pod details for the chaos execution - // if the target pod is not defined it will derive the random target pod list using pod affected percentage - if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") - } - //setup the tunables if provided in range - litmusLIB.SetChaosTunables(experimentsDetails) - - switch experimentsDetails.NetworkChaosType { - case "network-loss": - log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{ - "NetworkPacketLossPercentage": experimentsDetails.NetworkPacketLossPercentage, - "Sequence": experimentsDetails.Sequence, - "PodsAffectedPerc": experimentsDetails.PodsAffectedPerc, - }) - case "network-latency": - log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{ - "NetworkLatency": strconv.Itoa(experimentsDetails.NetworkLatency), - "Sequence": experimentsDetails.Sequence, - "PodsAffectedPerc": experimentsDetails.PodsAffectedPerc, - }) - case "network-corruption": - log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{ - "NetworkPacketCorruptionPercentage": experimentsDetails.NetworkPacketCorruptionPercentage, - "Sequence": experimentsDetails.Sequence, - "PodsAffectedPerc": experimentsDetails.PodsAffectedPerc, - }) - case "network-duplication": - log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{ - "NetworkPacketDuplicationPercentage": experimentsDetails.NetworkPacketDuplicationPercentage, - "Sequence": experimentsDetails.Sequence, - "PodsAffectedPerc": experimentsDetails.PodsAffectedPerc, - }) - default: - return errors.Errorf("invalid experiment, please check the environment.go") - - } - podsAffectedPerc, _ := strconv.Atoi(experimentsDetails.PodsAffectedPerc) - targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, podsAffectedPerc, clients, chaosDetails) - if err != nil { - return err - } - - podNames := []string{} - for _, pod := range targetPodList.Items { - podNames = append(podNames, pod.Name) - } - log.Infof("Target pods list for chaos, %v", podNames) - - //Waiting for the ramp time before chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - - if experimentsDetails.EngineName != "" { - if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err - } - } - - if experimentsDetails.EngineName != "" { - msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on target pod" - types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails) - events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine") - } - - switch strings.ToLower(experimentsDetails.Sequence) { - case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, args, resultDetails, eventsDetails); err != nil { - return err - } - case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, args, resultDetails, eventsDetails); err != nil { - return err - } - default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) - } - - //Waiting for the ramp time after chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - return nil -} - -// injectChaosInSerialMode stress the cpu of all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, args []string, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - - // run the probes during chaos - if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err - } - } - - // creating the helper pod to perform network chaos - for _, pod := range targetPodList.Items { - - runID := common.GetRunID() - - log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ - "Target Pod": pod.Name, - "NodeName": pod.Spec.NodeName, - }) - // args contains details of the specific chaos injection - // constructing `argsWithRegex` based on updated regex with a diff pod name - // without extending/concatenating the args var itself - argsWithRegex := append(args, "re2:k8s_POD_"+pod.Name+"_"+pod.Namespace) - log.Infof("Arguments for running %v are %v", experimentsDetails.ExperimentName, argsWithRegex) - if err := createHelperPod(experimentsDetails, clients, chaosDetails, pod.Spec.NodeName, runID, argsWithRegex); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) - } - - appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) - - //checking the status of the helper pod, wait till the pod comes to running state else fail the experiment - log.Info("[Status]: Checking the status of the helper pod") - if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) - } - common.SetTargets(pod.Name, "targeted", "pod", chaosDetails) - - // Wait till the completion of helper pod - log.Info("[Wait]: Waiting till the completion of the helper pod") - podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, chaosDetails.ExperimentName) - if err != nil || podStatus == "Failed" { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) - } - - //Deleting the helper pod - log.Info("[Cleanup]: Deleting the helper pod") - if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) - } - } - - return nil -} - -// injectChaosInParallelMode kill the container of all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, args []string, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - - // run the probes during chaos - if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err - } - } - - runID := common.GetRunID() - - // creating the helper pod to perform network chaos - for _, pod := range targetPodList.Items { - - log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ - "Target Pod": pod.Name, - "NodeName": pod.Spec.NodeName, - }) - // args contains details of the specific chaos injection - // constructing `argsWithRegex` based on updated regex with a diff pod name - // without extending/concatenating the args var itself - argsWithRegex := append(args, "re2:k8s_POD_"+pod.Name+"_"+pod.Namespace) - log.Infof("Arguments for running %v are %v", experimentsDetails.ExperimentName, argsWithRegex) - if err := createHelperPod(experimentsDetails, clients, chaosDetails, pod.Spec.NodeName, runID, argsWithRegex); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) - } - } - - appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) - - //checking the status of the helper pod, wait till the pod comes to running state else fail the experiment - log.Info("[Status]: Checking the status of the helper pod") - if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) - } - for _, pod := range targetPodList.Items { - common.SetTargets(pod.Name, "targeted", "pod", chaosDetails) - } - - // Wait till the completion of helper pod - log.Info("[Wait]: Waiting till the completion of the helper pod") - podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, chaosDetails.ExperimentName) - if err != nil || podStatus == "Failed" { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) - } - - //Deleting the helper pod - log.Info("[Cleanup]: Deleting the helper pod") - if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) - } - - return nil -} - -// createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, appNodeName, runID string, args []string) error { - - helperPod := &apiv1.Pod{ - ObjectMeta: v1.ObjectMeta{ - GenerateName: experimentsDetails.ExperimentName + "-helper-", - Namespace: experimentsDetails.ChaosNamespace, - Labels: common.GetHelperLabels(chaosDetails.Labels, runID, experimentsDetails.ExperimentName), - Annotations: chaosDetails.Annotations, - }, - Spec: apiv1.PodSpec{ - RestartPolicy: apiv1.RestartPolicyNever, - ImagePullSecrets: chaosDetails.ImagePullSecrets, - NodeName: appNodeName, - Volumes: []apiv1.Volume{ - { - Name: "dockersocket", - VolumeSource: apiv1.VolumeSource{ - HostPath: &apiv1.HostPathVolumeSource{ - Path: experimentsDetails.SocketPath, - }, - }, - }, - }, - Containers: []apiv1.Container{ - { - Name: experimentsDetails.ExperimentName, - Image: experimentsDetails.LIBImage, - ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy), - Command: []string{ - "sudo", - "-E", - }, - Args: args, - Env: []apiv1.EnvVar{ - { - Name: "DOCKER_HOST", - Value: "unix://" + experimentsDetails.SocketPath, - }, - }, - Resources: chaosDetails.Resources, - VolumeMounts: []apiv1.VolumeMount{ - { - Name: "dockersocket", - MountPath: experimentsDetails.SocketPath, - }, - }, - }, - }, - }, - } - - _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err -} - -// AddTargetIpsArgs inserts a comma-separated list of targetIPs (if provided by the user) into the pumba command/args -func AddTargetIpsArgs(targetIPs, targetHosts string, args []string) ([]string, error) { - - targetIPs, err := network_chaos.GetTargetIps(targetIPs, targetHosts, clients.ClientSets{}, false) - if err != nil { - return nil, err - } - - if targetIPs == "" { - return args, nil - } - ips := strings.Split(targetIPs, ",") - for i := range ips { - args = append(args, "--target", strings.TrimSpace(ips[i])) - } - return args, nil -} diff --git a/chaoslib/pumba/pod-io-stress/lib/pod-io-stress.go b/chaoslib/pumba/pod-io-stress/lib/pod-io-stress.go deleted file mode 100644 index dcdc75fe1..000000000 --- a/chaoslib/pumba/pod-io-stress/lib/pod-io-stress.go +++ /dev/null @@ -1,298 +0,0 @@ -package lib - -import ( - "context" - "fmt" - "strconv" - "strings" - - litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" - "github.com/litmuschaos/litmus-go/pkg/events" - experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/types" - "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/litmuschaos/litmus-go/pkg/probe" - "github.com/litmuschaos/litmus-go/pkg/status" - "github.com/litmuschaos/litmus-go/pkg/types" - "github.com/litmuschaos/litmus-go/pkg/utils/common" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// PreparePodIOStress contains prepration steps before chaos injection -func PreparePodIOStress(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - - //setup the tunables if provided in range - litmusLIB.SetChaosTunables(experimentsDetails) - - // Get the target pod details for the chaos execution - // if the target pod is not defined it will derive the random target pod list using pod affected percentage - if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { - return errors.Errorf("please provide one of the appLabel or TARGET_PODS") - } - podsAffectedPerc, _ := strconv.Atoi(experimentsDetails.PodsAffectedPerc) - targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, podsAffectedPerc, clients, chaosDetails) - if err != nil { - return err - } - - podNames := []string{} - for _, pod := range targetPodList.Items { - podNames = append(podNames, pod.Name) - } - log.Infof("Target pods list for chaos, %v", podNames) - - //Waiting for the ramp time before chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - - if experimentsDetails.EngineName != "" { - msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on target pod" - types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails) - events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine") - } - - if experimentsDetails.EngineName != "" { - if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil { - return err - } - } - - switch strings.ToLower(experimentsDetails.Sequence) { - case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err - } - case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { - return err - } - default: - return errors.Errorf("%v sequence is not supported", experimentsDetails.Sequence) - } - - //Waiting for the ramp time after chaos injection - if experimentsDetails.RampTime != 0 { - log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) - common.WaitForDuration(experimentsDetails.RampTime) - } - return nil -} - -// injectChaosInSerialMode stress the cpu of all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - - // run the probes during chaos - if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err - } - } - - // creating the helper pod to perform network chaos - for _, pod := range targetPodList.Items { - - runID := common.GetRunID() - - log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ - "Target Pod": pod.Name, - "NodeName": pod.Spec.NodeName, - "FilesystemUtilizationPercentage": experimentsDetails.FilesystemUtilizationPercentage, - "FilesystemUtilizationBytes": experimentsDetails.FilesystemUtilizationBytes, - }) - - if err := createHelperPod(experimentsDetails, clients, chaosDetails, pod.Name, pod.Spec.NodeName, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) - } - - appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) - - //checking the status of the helper pod, wait till the pod comes to running state else fail the experiment - log.Info("[Status]: Checking the status of the helper pod") - if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) - } - common.SetTargets(pod.Name, "targeted", "pod", chaosDetails) - - // Wait till the completion of helper pod - log.Info("[Wait]: Waiting till the completion of the helper pod") - podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, "pumba-stress") - if err != nil || podStatus == "Failed" { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) - } - - //Deleting the helper pod - log.Info("[Cleanup]: Deleting the helper pod") - if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) - } - } - - return nil -} - -// injectChaosInParallelMode kill the container of all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - // run the probes during chaos - if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { - return err - } - } - - runID := common.GetRunID() - - // creating the helper pod to perform network chaos - for _, pod := range targetPodList.Items { - - log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{ - "Target Pod": pod.Name, - "NodeName": pod.Spec.NodeName, - "FilesystemUtilizationPercentage": experimentsDetails.FilesystemUtilizationPercentage, - "FilesystemUtilizationBytes": experimentsDetails.FilesystemUtilizationBytes, - }) - - if err := createHelperPod(experimentsDetails, clients, chaosDetails, pod.Name, pod.Spec.NodeName, runID); err != nil { - return errors.Errorf("unable to create the helper pod, err: %v", err) - } - } - - appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) - - //checking the status of the helper pod, wait till the pod comes to running state else fail the experiment - log.Info("[Status]: Checking the status of the helper pod") - if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return errors.Errorf("helper pod is not in running state, err: %v", err) - } - for _, pod := range targetPodList.Items { - common.SetTargets(pod.Name, "targeted", "pod", chaosDetails) - } - - // Wait till the completion of helper pod - log.Info("[Wait]: Waiting till the completion of the helper pod") - podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, "pumba-stress") - if err != nil || podStatus == "Failed" { - common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) - return common.HelperFailedError(err) - } - - //Deleting the helper pod - log.Info("[Cleanup]: Deleting the helper pod") - if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return errors.Errorf("unable to delete the helper pod, err: %v", err) - } - - return nil -} - -// createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, appName, appNodeName, runID string) error { - - helperPod := &apiv1.Pod{ - ObjectMeta: v1.ObjectMeta{ - GenerateName: experimentsDetails.ExperimentName + "-helper-", - Namespace: experimentsDetails.ChaosNamespace, - Labels: common.GetHelperLabels(chaosDetails.Labels, runID, experimentsDetails.ExperimentName), - Annotations: chaosDetails.Annotations, - }, - Spec: apiv1.PodSpec{ - RestartPolicy: apiv1.RestartPolicyNever, - ImagePullSecrets: chaosDetails.ImagePullSecrets, - NodeName: appNodeName, - Volumes: []apiv1.Volume{ - { - Name: "dockersocket", - VolumeSource: apiv1.VolumeSource{ - HostPath: &apiv1.HostPathVolumeSource{ - Path: experimentsDetails.SocketPath, - }, - }, - }, - }, - Containers: []apiv1.Container{ - { - Name: "pumba-stress", - Image: experimentsDetails.LIBImage, - Command: []string{ - "sudo", - "-E", - }, - Args: getContainerArguments(experimentsDetails, appName), - Env: []apiv1.EnvVar{ - { - Name: "DOCKER_HOST", - Value: "unix://" + experimentsDetails.SocketPath, - }, - }, - Resources: chaosDetails.Resources, - VolumeMounts: []apiv1.VolumeMount{ - { - Name: "dockersocket", - MountPath: experimentsDetails.SocketPath, - }, - }, - ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy), - SecurityContext: &apiv1.SecurityContext{ - Capabilities: &apiv1.Capabilities{ - Add: []apiv1.Capability{ - "SYS_ADMIN", - }, - }, - }, - }, - }, - }, - } - - _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) - return err -} - -// getContainerArguments derives the args for the pumba stress helper pod -func getContainerArguments(experimentsDetails *experimentTypes.ExperimentDetails, appName string) []string { - - var hddbytes string - if experimentsDetails.FilesystemUtilizationBytes == "0" { - if experimentsDetails.FilesystemUtilizationPercentage == "0" { - hddbytes = "10%" - log.Info("Neither of FilesystemUtilizationPercentage or FilesystemUtilizationBytes provided, proceeding with a default FilesystemUtilizationPercentage value of 10%") - } else { - hddbytes = experimentsDetails.FilesystemUtilizationPercentage + "%" - } - } else { - if experimentsDetails.FilesystemUtilizationPercentage == "0" { - hddbytes = experimentsDetails.FilesystemUtilizationBytes + "G" - } else { - hddbytes = experimentsDetails.FilesystemUtilizationPercentage + "%" - log.Warn("Both FsUtilPercentage & FsUtilBytes provided as inputs, using the FsUtilPercentage value to proceed with stress exp") - } - } - - stressArgs := []string{ - "pumba", - "--log-level", - "debug", - "--label", - "io.kubernetes.pod.name=" + appName, - "stress", - "--duration", - strconv.Itoa(experimentsDetails.ChaosDuration) + "s", - "--stress-image", - experimentsDetails.StressImage, - "--stressors", - } - args := stressArgs - if experimentsDetails.VolumeMountPath == "" { - args = append(args, "--cpu 1 --io "+experimentsDetails.NumberOfWorkers+" --hdd "+experimentsDetails.NumberOfWorkers+" --hdd-bytes "+hddbytes+" --timeout "+strconv.Itoa(experimentsDetails.ChaosDuration)+"s") - } else { - args = append(args, "--cpu 1 --io "+experimentsDetails.NumberOfWorkers+" --hdd "+experimentsDetails.NumberOfWorkers+" --hdd-bytes "+hddbytes+" --temp-path "+experimentsDetails.VolumeMountPath+" --timeout "+strconv.Itoa(experimentsDetails.ChaosDuration)+"s") - } - return args -} diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go b/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go index 8a44dad0a..253d3706b 100644 --- a/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go +++ b/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go @@ -40,7 +40,7 @@ func AWSSSMChaosByID(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // Initialize the probe details. Bail out upon error, as we haven't entered exp business logic yet if err := probe.InitializeProbesInChaosResultDetails(&chaosDetails, clients, &resultDetails); err != nil { - log.Errorf("Unable to initialize the probes, err: %v", err) + log.Errorf("Unable to initialize the probes: %v", err) return } } @@ -48,9 +48,8 @@ func AWSSSMChaosByID(clients clients.ClientSets) { //Updating the chaos result in the beginning of experiment log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { - log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of ec2 terminate experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Unable to create the chaosresult: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -60,8 +59,9 @@ func AWSSSMChaosByID(clients clients.ClientSets) { // generating the event in chaosresult to marked the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") - + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict) + } // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) @@ -81,72 +81,66 @@ func AWSSSMChaosByID(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { - log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } } //Verify that the instance should have permission to perform ssm api calls if err := ssm.CheckInstanceInformation(&experimentsDetails); err != nil { - log.Errorf("failed perform ssm api calls, err: %v", err) - failStep := "[pre-chaos]: Failed to verify to make SSM api calls, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Failed perform ssm api calls: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } if chaosDetails.DefaultHealthCheck { //Verify the aws ec2 instance is running (pre chaos) if err := ec2.InstanceStatusCheckByID(experimentsDetails.EC2InstanceID, experimentsDetails.Region); err != nil { - log.Errorf("failed to get the ec2 instance status, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Failed to get the ec2 instance status: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: EC2 instance is in running state") } - // Including the litmus lib for aws-ssm-chaos-by-id - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareAWSSSMChaosByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - //Delete the ssm document on the given aws service monitoring docs - if experimentsDetails.IsDocsUploaded { - log.Info("[Recovery]: Delete the uploaded aws ssm docs") - if err := ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region); err != nil { - log.Errorf("fail to delete ssm doc, err: %v", err) - } + chaosDetails.Phase = types.ChaosInjectPhase + + if err := litmusLIB.PrepareAWSSSMChaosByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + //Delete the ssm document on the given aws service monitoring docs + if experimentsDetails.IsDocsUploaded { + log.Info("[Recovery]: Delete the uploaded aws ssm docs") + if err := ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region); err != nil { + log.Errorf("Failed to delete ssm doc: %v", err) } - return } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + if chaosDetails.DefaultHealthCheck { //Verify the aws ec2 instance is running (post chaos) if err := ec2.InstanceStatusCheckByID(experimentsDetails.EC2InstanceID, experimentsDetails.Region); err != nil { - log.Errorf("failed to get the ec2 instance status, err: %v", err) - failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Failed to get the ec2 instance status: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: EC2 instance is in running state (post chaos)") @@ -159,12 +153,13 @@ func AWSSSMChaosByID(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { - log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -172,13 +167,15 @@ func AWSSSMChaosByID(clients clients.ClientSets) { // generating post chaos event types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } } //Updating the chaosResult in the end of experiment log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { - log.Errorf("Unable to Update the Chaos Result, err: %v", err) + log.Errorf("Unable to update the chaosresult: %v", err) return } @@ -191,12 +188,14 @@ func AWSSSMChaosByID(clients clients.ClientSets) { eventType = "Warning" } types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") - + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", reason) + } if experimentsDetails.EngineName != "" { msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed" types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.Summary) + } } - } diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go b/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go index cf00746c9..6eb7d3812 100644 --- a/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go +++ b/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go @@ -40,7 +40,7 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // Initialize the probe details. Bail out upon error, as we haven't entered exp business logic yet if err := probe.InitializeProbesInChaosResultDetails(&chaosDetails, clients, &resultDetails); err != nil { - log.Errorf("Unable to initialize the probes, err: %v", err) + log.Errorf("Unable to initialize the probes: %v", err) return } } @@ -48,9 +48,8 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { //Updating the chaos result in the beginning of experiment log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { - log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of ec2 terminate experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Unable to create the chaosresult: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -60,7 +59,9 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { // generating the event in chaosresult to marked the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict) + } // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) @@ -75,9 +76,8 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { //Verify that the instance should have permission to perform ssm api calls if err := ssm.CheckInstanceInformation(&experimentsDetails); err != nil { - log.Errorf("target instance status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Target instance status check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -89,53 +89,49 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { - log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } } - // Including the litmus lib for aws-ssm-chaos-by-tag - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareAWSSSMChaosByTag(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - //Delete the ssm document on the given aws service monitoring docs - if experimentsDetails.IsDocsUploaded { - log.Info("[Recovery]: Delete the uploaded aws ssm docs") - if err := ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region); err != nil { - log.Errorf("fail to delete ssm doc, err: %v", err) - } + chaosDetails.Phase = types.ChaosInjectPhase + + if err := litmusLIB.PrepareAWSSSMChaosByTag(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + //Delete the ssm document on the given aws service monitoring docs + if experimentsDetails.IsDocsUploaded { + log.Info("[Recovery]: Delete the uploaded aws ssm docs") + if err := ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region); err != nil { + log.Errorf("Failed to delete ssm document: %v", err) } - return } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + if chaosDetails.DefaultHealthCheck { //Verify the aws ec2 instance is running (post chaos) if err := ec2.InstanceStatusCheck(experimentsDetails.TargetInstanceIDList, experimentsDetails.Region); err != nil { - log.Errorf("failed to get the ec2 instance status, err: %v", err) - failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Failed to get the ec2 instance status: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: EC2 instance is in running state (post chaos)") @@ -148,12 +144,13 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { - log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -161,13 +158,14 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { // generating post chaos event types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } } - //Updating the chaosResult in the end of experiment log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { - log.Errorf("Unable to Update the Chaos Result, err: %v", err) + log.Errorf("Unable to update the chaosresult: %v", err) return } @@ -180,12 +178,14 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { eventType = "Warning" } types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") - + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", reason) + } if experimentsDetails.EngineName != "" { msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed" types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.Summary) + } } - } diff --git a/experiments/azure/azure-disk-loss/experiment/azure-disk-loss.go b/experiments/azure/azure-disk-loss/experiment/azure-disk-loss.go index 6e0408767..f50092c54 100644 --- a/experiments/azure/azure-disk-loss/experiment/azure-disk-loss.go +++ b/experiments/azure/azure-disk-loss/experiment/azure-disk-loss.go @@ -41,7 +41,7 @@ func AzureDiskLoss(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // Initialize the probe details. Bail out upon error, as we haven't entered exp business logic yet if err = probe.InitializeProbesInChaosResultDetails(&chaosDetails, clients, &resultDetails); err != nil { - log.Errorf("Unable to initialize the probes, err: %v", err) + log.Errorf("Unable to initialize the probes: %v", err) return } } @@ -49,9 +49,8 @@ func AzureDiskLoss(clients clients.ClientSets) { //Updating the chaos result in the beginning of experiment log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { - log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of azure disk loss experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Unable to create the chaosresult: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -61,7 +60,9 @@ func AzureDiskLoss(clients clients.ClientSets) { // generating the event in chaosresult to marked the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresults", types.AwaitedVerdict) + } // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) @@ -76,9 +77,8 @@ func AzureDiskLoss(clients clients.ClientSets) { // Setting up Azure Subscription ID if experimentsDetails.SubscriptionID, err = azureCommon.GetSubscriptionID(); err != nil { - log.Errorf("fail to get the subscription id, err: %v", err) - failStep := "[pre-chaos]: Failed to get the subscription ID for authentication, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("fail to get the subscription id: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -86,9 +86,8 @@ func AzureDiskLoss(clients clients.ClientSets) { if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the virtual disk are attached to VM instance(pre-chaos)") if err = azureStatus.CheckVirtualDiskWithInstance(experimentsDetails.SubscriptionID, experimentsDetails.VirtualDiskNames, experimentsDetails.ResourceGroup); err != nil { - log.Errorf("Virtual disk status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the virtual disk are attached to VM instance, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Virtual disk status check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -101,47 +100,43 @@ func AzureDiskLoss(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { - log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } } - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err = litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - log.Errorf("Chaos injection failed, err: %v", err) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err = litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + log.Errorf("Chaos injection failed: %v", err) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + // POST-CHAOS VIRTUAL DISK STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the virtual disk are attached to VM instance(post-chaos)") if err = azureStatus.CheckVirtualDiskWithInstance(experimentsDetails.SubscriptionID, experimentsDetails.VirtualDiskNames, experimentsDetails.ResourceGroup); err != nil { - log.Errorf("Virtual disk status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the virtual disk are attached to VM instance, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Virtual disk status check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -153,12 +148,13 @@ func AzureDiskLoss(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { - log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -166,13 +162,15 @@ func AzureDiskLoss(clients clients.ClientSets) { // generating post chaos event types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } } //Updating the chaosResult in the end of experiment log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { - log.Errorf("Unable to Update the Chaos Result, err: %v", err) + log.Errorf("Unable to update the chaosresult: %v", err) return } @@ -185,11 +183,15 @@ func AzureDiskLoss(clients clients.ClientSets) { eventType = "Warning" } types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresults", reason) + } if experimentsDetails.EngineName != "" { msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed" types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.Summary) + } } } diff --git a/experiments/azure/instance-stop/experiment/azure-instance-stop.go b/experiments/azure/instance-stop/experiment/azure-instance-stop.go index 16b99a7e9..5993c37d6 100644 --- a/experiments/azure/instance-stop/experiment/azure-instance-stop.go +++ b/experiments/azure/instance-stop/experiment/azure-instance-stop.go @@ -42,7 +42,7 @@ func AzureInstanceStop(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // Initialize the probe details. Bail out upon error, as we haven't entered exp business logic yet if err = probe.InitializeProbesInChaosResultDetails(&chaosDetails, clients, &resultDetails); err != nil { - log.Errorf("Unable to initialize the probes, err: %v", err) + log.Errorf("Unable to initialize the probes: %v", err) } } @@ -50,9 +50,8 @@ func AzureInstanceStop(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT") if err != nil { - log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of azure instance stop experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Unable to create the chaosresult: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -72,16 +71,17 @@ func AzureInstanceStop(clients clients.ClientSets) { // Setting up Azure Subscription ID if experimentsDetails.SubscriptionID, err = azureCommon.GetSubscriptionID(); err != nil { - log.Errorf("fail to get the subscription id, err: %v", err) - failStep := "[pre-chaos]: Failed to get the subscription ID for authentication, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Failed to get the subscription id: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // generating the event in chaosresult to marked the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResults"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresults", types.AwaitedVerdict) + } if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test @@ -92,57 +92,52 @@ func AzureInstanceStop(clients clients.ClientSets) { err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails) if err != nil { - log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } } //Verify the azure target instance is running (pre-chaos) if chaosDetails.DefaultHealthCheck { if err = azureStatus.InstanceStatusCheckByName(experimentsDetails.AzureInstanceNames, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup); err != nil { - log.Errorf("failed to get the azure instance status, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the azure instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Azure instance status check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: Azure instance(s) is in running state (pre-chaos)") } - // Including the litmus lib for azure instance stopping - switch experimentsDetails.ChaosLib { - case "litmus": - if err = litmusLIB.PrepareAzureStop(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err = litmusLIB.PrepareAzureStop(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Confirmation]: Azure instance stop chaos has been injected successfully") resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + //Verify the azure instance is running (post chaos) if chaosDetails.DefaultHealthCheck { if err = azureStatus.InstanceStatusCheckByName(experimentsDetails.AzureInstanceNames, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup); err != nil { - log.Errorf("failed to get the azure instance status, err: %v", err) - failStep := "[pre-chaos]: Failed to update the azure instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Azure instance status check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: Azure instance is in running state (post chaos)") @@ -156,12 +151,13 @@ func AzureInstanceStop(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails) if err != nil { - log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -169,14 +165,16 @@ func AzureInstanceStop(clients clients.ClientSets) { // generating post chaos event types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } } //Updating the chaosResult in the end of experiment log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT") if err != nil { - log.Errorf("Unable to Update the Chaos Result, err: %v", err) + log.Errorf("Unable to update the chaosresult: %v", err) } // generating the event in chaosresult to marked the verdict as pass/fail @@ -188,12 +186,16 @@ func AzureInstanceStop(clients clients.ClientSets) { eventType = "Warning" } types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResults"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresults", reason) + } if experimentsDetails.EngineName != "" { msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed" types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.Summary) + } } } diff --git a/experiments/baremetal/redfish-node-restart/experiment/redfish-node-restart.go b/experiments/baremetal/redfish-node-restart/experiment/redfish-node-restart.go index 1da9e189f..e0c4b46c0 100644 --- a/experiments/baremetal/redfish-node-restart/experiment/redfish-node-restart.go +++ b/experiments/baremetal/redfish-node-restart/experiment/redfish-node-restart.go @@ -49,8 +49,7 @@ func NodeRestart(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of redfish-node-restart experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -76,8 +75,7 @@ func NodeRestart(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -87,8 +85,7 @@ func NodeRestart(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -97,14 +94,12 @@ func NodeRestart(clients clients.ClientSets) { log.Info("[Status]: Verify that the NUT (Node Under Test) is running (pre-chaos)") nodeStatus, err := redfishLib.GetNodeStatus(experimentsDetails.IPMIIP, experimentsDetails.User, experimentsDetails.Password) if err != nil { - failStep := "[pre-chaos]: Failed to verify that the NUT (Node Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) log.Errorf("[Verification]: Unable to get node power status(pre-chaos). Error: %v", err) return } if nodeStatus != "On" { - failStep := "[pre-chaos]: Failed to verify that the NUT (Node Under Test) is running" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) log.Errorf("[Verification]: Node is not in running state(pre-chaos)") return } @@ -119,11 +114,10 @@ func NodeRestart(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Running, Probes: Successful" @@ -133,32 +127,25 @@ func NodeRestart(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - log.Errorf("Chaos injection failed, err: %v", err) - return - } - default: - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - log.Error("lib not supported, provide the correct value of lib") + chaosDetails.Phase = types.ChaosInjectPhase + + if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + log.Errorf("Chaos injection failed, err: %v", err) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err = status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -168,8 +155,7 @@ func NodeRestart(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -178,14 +164,12 @@ func NodeRestart(clients clients.ClientSets) { log.Info("[Status]: Verify that the NUT (Node Under Test) is running (post-chaos)") nodeStatus, err = redfishLib.GetNodeStatus(experimentsDetails.IPMIIP, experimentsDetails.User, experimentsDetails.Password) if err != nil { - failStep := "[post-chaos]: Failed to verify that the NUT (Node Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) log.Errorf("[Verification]: Unable to get node power status. Error: %v ", err) return } if nodeStatus != "On" { - failStep := "[post-chaos]: Failed to verify that the NUT (Node Under Test) is running" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) log.Errorf("[Verification]: Node is not in running state(post-chaos)") return } @@ -199,11 +183,10 @@ func NodeRestart(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Running, Probes: Successful" diff --git a/experiments/cassandra/pod-delete/experiment/pod-delete.go b/experiments/cassandra/pod-delete/experiment/pod-delete.go index a2dbf437c..02157c62e 100644 --- a/experiments/cassandra/pod-delete/experiment/pod-delete.go +++ b/experiments/cassandra/pod-delete/experiment/pod-delete.go @@ -51,8 +51,7 @@ func CasssandraPodDelete(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ChaoslibDetail.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of cassandra pod-delete experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -81,10 +80,9 @@ func CasssandraPodDelete(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err = status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -92,8 +90,7 @@ func CasssandraPodDelete(clients clients.ClientSets) { log.Info("[Status]: Checking the load distribution on the ring (pre-chaos)") if err = cassandra.NodeToolStatusCheck(&experimentsDetails, clients); err != nil { log.Errorf("[Status]: Chaos node tool status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to check the load distribution on the ring, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -107,11 +104,10 @@ func CasssandraPodDelete(clients clients.ClientSets) { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -126,8 +122,7 @@ func CasssandraPodDelete(clients clients.ClientSets) { ResourceVersionBefore, err = cassandra.LivenessCheck(&experimentsDetails, clients) if err != nil { log.Errorf("[Liveness]: Cassandra liveness check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to create liveness pod, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Confirmation]: The cassandra application liveness pod created successfully") @@ -135,34 +130,27 @@ func CasssandraPodDelete(clients clients.ClientSets) { log.Warn("[Liveness]: Cassandra Liveness check skipped as it was not enable") } - // Including the litmus lib for cassandra-pod-delete - switch experimentsDetails.ChaoslibDetail.ChaosLib { - case "litmus": - if err = litmusLIB.PreparePodDelete(experimentsDetails.ChaoslibDetail, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err = litmusLIB.PreparePodDelete(experimentsDetails.ChaoslibDetail, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ChaoslibDetail.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err = status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -170,8 +158,7 @@ func CasssandraPodDelete(clients clients.ClientSets) { log.Info("[Status]: Checking the load distribution on the ring (post-chaos)") if err = cassandra.NodeToolStatusCheck(&experimentsDetails, clients); err != nil { log.Errorf("[Status]: Chaos node tool status check is failed, err: %v", err) - failStep := "[post-check] Failed to check for load distribution on the ring, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -184,11 +171,10 @@ func CasssandraPodDelete(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -205,14 +191,12 @@ func CasssandraPodDelete(clients clients.ClientSets) { if experimentsDetails.CassandraLivenessCheck == "enable" { if err = status.CheckApplicationStatusesByLabels(experimentsDetails.ChaoslibDetail.AppNS, "name=cassandra-liveness-deploy-"+experimentsDetails.RunID, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil { log.Errorf("Liveness status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to check the status of liveness pod, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } if err = cassandra.LivenessCleanup(&experimentsDetails, clients, ResourceVersionBefore); err != nil { log.Errorf("Liveness cleanup failed, err: %v", err) - failStep := "[post-chaos]: Failed to delete liveness pod, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } diff --git a/experiments/gcp/gcp-vm-disk-loss-by-label/experiment/gcp-vm-disk-loss-by-label.go b/experiments/gcp/gcp-vm-disk-loss-by-label/experiment/gcp-vm-disk-loss-by-label.go index 110565499..ebd1a7928 100644 --- a/experiments/gcp/gcp-vm-disk-loss-by-label/experiment/gcp-vm-disk-loss-by-label.go +++ b/experiments/gcp/gcp-vm-disk-loss-by-label/experiment/gcp-vm-disk-loss-by-label.go @@ -54,8 +54,7 @@ func GCPVMDiskLossByLabel(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of gcp-vm-disk-loss-by-label experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -86,11 +85,10 @@ func GCPVMDiskLossByLabel(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -104,47 +102,38 @@ func GCPVMDiskLossByLabel(clients clients.ClientSets) { computeService, err = gcp.GetGCPComputeService() if err != nil { log.Errorf("Failed to obtain a gcp compute service, err: %v", err) - failStep := "[pre-chaos]: Failed to obtain a gcp compute service, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } //selecting the target instances (pre-chaos) if err := gcp.SetTargetDiskVolumes(computeService, &experimentsDetails); err != nil { log.Errorf("Failed to get the target gcp disk volumes, err: %v", err) - failStep := "[pre-chaos]: Failed to select the target disk volumes from label, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: Disk volumes are attached to the VM instances (pre-chaos)") - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareDiskVolumeLossByLabel(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match found for specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err := litmusLIB.PrepareDiskVolumeLossByLabel(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + // Checking disk volume attachment post-chaos for i := range experimentsDetails.TargetDiskVolumeNamesList { instanceName, err := gcp.GetVolumeAttachmentDetails(computeService, experimentsDetails.GCPProjectID, experimentsDetails.Zones, experimentsDetails.TargetDiskVolumeNamesList[i]) if err != nil || instanceName == "" { log.Errorf("Failed to verify disk volume attachment status, err: %v", err) - failStep := "[post-chaos]: Failed to verify disk volume attachment status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -159,11 +148,10 @@ func GCPVMDiskLossByLabel(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" diff --git a/experiments/gcp/gcp-vm-disk-loss/experiment/gcp-vm-disk-loss.go b/experiments/gcp/gcp-vm-disk-loss/experiment/gcp-vm-disk-loss.go index d3682828d..1909cbc4b 100644 --- a/experiments/gcp/gcp-vm-disk-loss/experiment/gcp-vm-disk-loss.go +++ b/experiments/gcp/gcp-vm-disk-loss/experiment/gcp-vm-disk-loss.go @@ -54,8 +54,7 @@ func VMDiskLoss(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of gcp disk loss experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -86,11 +85,10 @@ func VMDiskLoss(clients clients.ClientSets) { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -104,8 +102,7 @@ func VMDiskLoss(clients clients.ClientSets) { computeService, err = gcp.GetGCPComputeService() if err != nil { log.Errorf("Failed to obtain a gcp compute service, err: %v", err) - failStep := "[pre-chaos]: Failed to obtain a gcp compute service, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -113,8 +110,7 @@ func VMDiskLoss(clients clients.ClientSets) { if chaosDetails.DefaultHealthCheck { if err := gcp.DiskVolumeStateCheck(computeService, &experimentsDetails); err != nil { log.Errorf("Volume status check failed pre chaos, err: %v", err) - failStep := "[pre-chaos]: Failed to verify if the disk volume is attached to an instance, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: Disk volumes are attached to the VM instances (pre-chaos)") @@ -123,36 +119,28 @@ func VMDiskLoss(clients clients.ClientSets) { // Fetch target disk instance names if err := gcp.SetTargetDiskInstanceNames(computeService, &experimentsDetails); err != nil { log.Errorf("Failed to fetch the disk instance names, err: %v", err) - failStep := "[pre-chaos]: Failed to fetch the disk instance names, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // Including the litmus lib for disk-loss - switch experimentsDetails.ChaosLib { - case "litmus": - if err = litmusLIB.PrepareDiskVolumeLoss(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err = litmusLIB.PrepareDiskVolumeLoss(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + //Verify the vm instance is attached to disk volume if chaosDetails.DefaultHealthCheck { if err := gcp.DiskVolumeStateCheck(computeService, &experimentsDetails); err != nil { log.Errorf("Volume status check failed post chaos, err: %v", err) - failStep := "[post-chaos]: Failed to verify if the disk volume is attached to an instance, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: Disk volumes are attached to the VM instances (post-chaos)") @@ -166,11 +154,10 @@ func VMDiskLoss(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" diff --git a/experiments/gcp/gcp-vm-instance-stop-by-label/experiment/gcp-vm-instance-stop-by-label.go b/experiments/gcp/gcp-vm-instance-stop-by-label/experiment/gcp-vm-instance-stop-by-label.go index 47ad0e323..a9c704f76 100644 --- a/experiments/gcp/gcp-vm-instance-stop-by-label/experiment/gcp-vm-instance-stop-by-label.go +++ b/experiments/gcp/gcp-vm-instance-stop-by-label/experiment/gcp-vm-instance-stop-by-label.go @@ -54,8 +54,7 @@ func GCPVMInstanceStopByLabel(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of gcp-vm-instance-stop-by-label experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -87,11 +86,10 @@ func GCPVMInstanceStopByLabel(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -105,46 +103,37 @@ func GCPVMInstanceStopByLabel(clients clients.ClientSets) { computeService, err = gcp.GetGCPComputeService() if err != nil { log.Errorf("Failed to obtain a gcp compute service, err: %v", err) - failStep := "[pre-chaos]: Failed to obtain a gcp compute service, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } //selecting the target instances (pre-chaos) if err = gcp.SetTargetInstance(computeService, &experimentsDetails); err != nil { log.Errorf("Failed to get the target VM instances, err: %v", err) - failStep := "[pre-chaos]: Failed to select the target VM instances from label, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: VM instances are in a running state (pre-chaos)") - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareVMStopByLabel(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match found for specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err := litmusLIB.PrepareVMStopByLabel(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + // Verify that GCP VM instance is running (post-chaos) if experimentsDetails.ManagedInstanceGroup != "enable" { if err := gcp.InstanceStatusCheck(computeService, experimentsDetails.TargetVMInstanceNameList, experimentsDetails.GCPProjectID, []string{experimentsDetails.Zones}); err != nil { log.Errorf("Failed to get VM instance status, err: %v", err) - failStep := "[post-chaos]: Failed to get VM instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -159,11 +148,10 @@ func GCPVMInstanceStopByLabel(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" diff --git a/experiments/gcp/gcp-vm-instance-stop/experiment/gcp-vm-instance-stop.go b/experiments/gcp/gcp-vm-instance-stop/experiment/gcp-vm-instance-stop.go index 70db32194..48520c474 100644 --- a/experiments/gcp/gcp-vm-instance-stop/experiment/gcp-vm-instance-stop.go +++ b/experiments/gcp/gcp-vm-instance-stop/experiment/gcp-vm-instance-stop.go @@ -54,8 +54,7 @@ func VMInstanceStop(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of gcp vm instance stop experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -86,11 +85,10 @@ func VMInstanceStop(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -104,48 +102,39 @@ func VMInstanceStop(clients clients.ClientSets) { computeService, err = gcp.GetGCPComputeService() if err != nil { log.Errorf("Failed to obtain a gcp compute service, err: %v", err) - failStep := "[pre-chaos]: Failed to obtain a gcp compute service, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Verify that the GCP VM instance(s) is in RUNNING state (pre-chaos) if chaosDetails.DefaultHealthCheck { if err := gcp.InstanceStatusCheckByName(computeService, experimentsDetails.ManagedInstanceGroup, experimentsDetails.Delay, experimentsDetails.Timeout, "pre-chaos", experimentsDetails.VMInstanceName, experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { - log.Errorf("failed to get the vm instance status, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the GCP VM instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Failed to get the vm instance status, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: VM instance is in running state (pre-chaos)") } - // Including the litmus lib for GCP vm-instance-stop - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareVMStop(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err := litmusLIB.PrepareVMStop(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + //Verify the GCP VM instance is in RUNNING status (post-chaos) if chaosDetails.DefaultHealthCheck { if err := gcp.InstanceStatusCheckByName(computeService, experimentsDetails.ManagedInstanceGroup, experimentsDetails.Delay, experimentsDetails.Timeout, "post-chaos", experimentsDetails.VMInstanceName, experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil { log.Errorf("failed to get the vm instance status, err: %v", err) - failStep := "[post-chaos]: Failed to verify the GCP VM instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -160,11 +149,10 @@ func VMInstanceStop(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" diff --git a/experiments/generic/container-kill/experiment/container-kill.go b/experiments/generic/container-kill/experiment/container-kill.go index a724d3ce0..846d4c805 100644 --- a/experiments/generic/container-kill/experiment/container-kill.go +++ b/experiments/generic/container-kill/experiment/container-kill.go @@ -5,7 +5,6 @@ import ( "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/container-kill/lib" - pumbaLIB "github.com/litmuschaos/litmus-go/chaoslib/pumba/container-kill/lib" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/container-kill/environment" @@ -49,8 +48,7 @@ func ContainerKill(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of container-kill experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -64,11 +62,10 @@ func ContainerKill(clients clients.ClientSets) { //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows", logrus.Fields{ - "Namespace": experimentsDetails.AppNS, - "Label": experimentsDetails.AppLabel, - "Target Container": experimentsDetails.TargetContainer, - "Chaos Duration": experimentsDetails.ChaosDuration, - "Container Runtime": experimentsDetails.ContainerRuntime, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, + "Chaos Duration": experimentsDetails.ChaosDuration, + "Chaos Interval": experimentsDetails.ChaosInterval, }) // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result @@ -79,10 +76,9 @@ func ContainerKill(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -96,11 +92,10 @@ func ContainerKill(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -110,41 +105,25 @@ func ContainerKill(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for container-kill - switch { - case experimentsDetails.ChaosLib == "litmus": - if err := litmusLIB.PrepareContainerKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - log.Errorf("Chaos injection failed, err: %v", err) - return - } - case experimentsDetails.ChaosLib == "pumba" && experimentsDetails.ContainerRuntime == "docker": - if err := pumbaLIB.PrepareContainerKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - log.Errorf("Chaos injection failed, err: %v", err) - return - } - default: - failStep := "[chaos]: lib and container-runtime combination not supported!" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - log.Error("lib and container-runtime combination not supported, provide the correct value of lib & container-runtime") + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareContainerKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -157,11 +136,10 @@ func ContainerKill(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -176,10 +154,11 @@ func ContainerKill(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/disk-fill/experiment/disk-fill.go b/experiments/generic/disk-fill/experiment/disk-fill.go index b72736778..1b78a3115 100644 --- a/experiments/generic/disk-fill/experiment/disk-fill.go +++ b/experiments/generic/disk-fill/experiment/disk-fill.go @@ -47,8 +47,7 @@ func DiskFill(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of disk-fill experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -62,7 +61,7 @@ func DiskFill(clients clients.ClientSets) { //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows", logrus.Fields{ - "AppDetails": chaosDetails.AppDetail, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), "Fill Percentage": experimentsDetails.FillPercentage, "Chaos Duration": experimentsDetails.ChaosDuration, }) @@ -75,10 +74,9 @@ func DiskFill(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -92,11 +90,10 @@ func DiskFill(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -106,34 +103,25 @@ func DiskFill(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for disk-fill - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareDiskFill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareDiskFill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -146,11 +134,10 @@ func DiskFill(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -165,10 +152,11 @@ func DiskFill(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result err: %v\n", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/docker-service-kill/experiment/docker-service-kill.go b/experiments/generic/docker-service-kill/experiment/docker-service-kill.go index 53cb850a1..4dd4b997e 100644 --- a/experiments/generic/docker-service-kill/experiment/docker-service-kill.go +++ b/experiments/generic/docker-service-kill/experiment/docker-service-kill.go @@ -48,15 +48,14 @@ func DockerServiceKill(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of docker-service-kill experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") @@ -76,8 +75,7 @@ func DockerServiceKill(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -86,8 +84,7 @@ func DockerServiceKill(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -96,10 +93,9 @@ func DockerServiceKill(clients clients.ClientSets) { log.Info("[Status]: Getting the status of target nodes") if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to check the status of nodes, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -113,11 +109,10 @@ func DockerServiceKill(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -127,32 +122,23 @@ func DockerServiceKill(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for docker-service-kill - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareDockerServiceKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - log.Errorf("Chaos injection failed, err: %v", err) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareDockerServiceKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + log.Errorf("Chaos injection failed, err: %v", err) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -161,8 +147,7 @@ func DockerServiceKill(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -184,11 +169,10 @@ func DockerServiceKill(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -203,6 +187,7 @@ func DockerServiceKill(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } diff --git a/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go b/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go index dcd053982..a381637c8 100644 --- a/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go +++ b/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go @@ -48,15 +48,14 @@ func KubeletServiceKill(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of kubelet-service-kill experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") @@ -76,8 +75,7 @@ func KubeletServiceKill(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -86,8 +84,7 @@ func KubeletServiceKill(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -96,10 +93,9 @@ func KubeletServiceKill(clients clients.ClientSets) { log.Info("[Status]: Getting the status of target nodes") if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -113,11 +109,10 @@ func KubeletServiceKill(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -127,32 +122,23 @@ func KubeletServiceKill(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for kubelet-service-kill - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareKubeletKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareKubeletKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -161,8 +147,7 @@ func KubeletServiceKill(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -184,11 +169,10 @@ func KubeletServiceKill(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -203,6 +187,7 @@ func KubeletServiceKill(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } diff --git a/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go b/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go index 42902eb69..b565ef051 100644 --- a/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go +++ b/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go @@ -48,15 +48,14 @@ func NodeCPUHog(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of node-cpu-hog experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") @@ -77,8 +76,7 @@ func NodeCPUHog(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -87,8 +85,7 @@ func NodeCPUHog(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -97,10 +94,9 @@ func NodeCPUHog(clients clients.ClientSets) { log.Info("[Status]: Getting the status of target nodes") if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -114,11 +110,10 @@ func NodeCPUHog(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -128,32 +123,23 @@ func NodeCPUHog(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for node-cpu-hog - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareNodeCPUHog(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("[Error]: CPU hog failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareNodeCPUHog(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("[Error]: CPU hog failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -162,8 +148,7 @@ func NodeCPUHog(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -185,11 +170,10 @@ func NodeCPUHog(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -204,10 +188,11 @@ func NodeCPUHog(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/node-drain/experiment/node-drain.go b/experiments/generic/node-drain/experiment/node-drain.go index 201caba8f..579504285 100644 --- a/experiments/generic/node-drain/experiment/node-drain.go +++ b/experiments/generic/node-drain/experiment/node-drain.go @@ -48,15 +48,14 @@ func NodeDrain(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of node-drain experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") @@ -76,8 +75,7 @@ func NodeDrain(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -86,8 +84,7 @@ func NodeDrain(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -96,10 +93,9 @@ func NodeDrain(clients clients.ClientSets) { log.Info("[Status]: Getting the status of target nodes") if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -113,11 +109,10 @@ func NodeDrain(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -127,32 +122,23 @@ func NodeDrain(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for node-drain - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareNodeDrain(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareNodeDrain(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -161,8 +147,7 @@ func NodeDrain(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -184,11 +169,10 @@ func NodeDrain(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -203,10 +187,11 @@ func NodeDrain(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/node-io-stress/experiment/node-io-stress.go b/experiments/generic/node-io-stress/experiment/node-io-stress.go index 41d41dd26..05723ec4b 100644 --- a/experiments/generic/node-io-stress/experiment/node-io-stress.go +++ b/experiments/generic/node-io-stress/experiment/node-io-stress.go @@ -48,15 +48,14 @@ func NodeIOStress(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of node-io-stress experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") @@ -79,8 +78,7 @@ func NodeIOStress(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -89,8 +87,7 @@ func NodeIOStress(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -99,10 +96,9 @@ func NodeIOStress(clients clients.ClientSets) { log.Info("[Status]: Getting the status of target nodes") if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -116,11 +112,10 @@ func NodeIOStress(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -130,32 +125,23 @@ func NodeIOStress(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for node-io-stress - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareNodeIOStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("[Error]: node io stress failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareNodeIOStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("[Error]: node io stress failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -164,8 +150,7 @@ func NodeIOStress(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -187,11 +172,10 @@ func NodeIOStress(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -206,10 +190,11 @@ func NodeIOStress(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/node-memory-hog/experiment/node-memory-hog.go b/experiments/generic/node-memory-hog/experiment/node-memory-hog.go index d31c68da3..bc9d28033 100644 --- a/experiments/generic/node-memory-hog/experiment/node-memory-hog.go +++ b/experiments/generic/node-memory-hog/experiment/node-memory-hog.go @@ -48,15 +48,14 @@ func NodeMemoryHog(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of node-memory-hog experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") @@ -78,8 +77,7 @@ func NodeMemoryHog(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -88,8 +86,7 @@ func NodeMemoryHog(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -98,10 +95,9 @@ func NodeMemoryHog(clients clients.ClientSets) { log.Info("[Status]: Getting the status of target nodes") if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -115,11 +111,10 @@ func NodeMemoryHog(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -129,32 +124,23 @@ func NodeMemoryHog(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for node-memory-hog - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareNodeMemoryHog(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("[Error]: node memory hog failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareNodeMemoryHog(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("[Error]: node memory hog failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -163,8 +149,7 @@ func NodeMemoryHog(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -186,11 +171,10 @@ func NodeMemoryHog(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -205,10 +189,11 @@ func NodeMemoryHog(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/node-restart/experiment/node-restart.go b/experiments/generic/node-restart/experiment/node-restart.go index a53aa2630..6450d5c18 100644 --- a/experiments/generic/node-restart/experiment/node-restart.go +++ b/experiments/generic/node-restart/experiment/node-restart.go @@ -48,14 +48,18 @@ func NodeRestart(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of node-restart experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) + // generating the event in chaosresult to mark the verdict as awaited + msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" + types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows", logrus.Fields{ "Node Label": experimentsDetails.NodeLabel, @@ -71,8 +75,7 @@ func NodeRestart(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -81,8 +84,7 @@ func NodeRestart(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -91,10 +93,9 @@ func NodeRestart(clients clients.ClientSets) { log.Info("[Status]: Getting the status of target nodes") if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -108,11 +109,10 @@ func NodeRestart(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -122,32 +122,23 @@ func NodeRestart(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for node-restart - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareNodeRestart(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("[Error]: Node restart failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareNodeRestart(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("[Error]: Node restart failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -156,8 +147,7 @@ func NodeRestart(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -179,11 +169,10 @@ func NodeRestart(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -198,6 +187,7 @@ func NodeRestart(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } if experimentsDetails.EngineName != "" { @@ -206,7 +196,7 @@ func NodeRestart(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed" + msg = experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed" types.SetResultEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") } diff --git a/experiments/generic/node-taint/experiment/node-taint.go b/experiments/generic/node-taint/experiment/node-taint.go index f415630b9..bdcbe65dd 100644 --- a/experiments/generic/node-taint/experiment/node-taint.go +++ b/experiments/generic/node-taint/experiment/node-taint.go @@ -48,15 +48,14 @@ func NodeTaint(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of node-taint experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") @@ -77,8 +76,7 @@ func NodeTaint(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -87,8 +85,7 @@ func NodeTaint(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -97,10 +94,9 @@ func NodeTaint(clients clients.ClientSets) { log.Info("[Status]: Getting the status of target nodes") if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -114,11 +110,10 @@ func NodeTaint(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -128,32 +123,23 @@ func NodeTaint(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for node-taint - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareNodeTaint(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareNodeTaint(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -162,8 +148,7 @@ func NodeTaint(clients clients.ClientSets) { log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -185,11 +170,10 @@ func NodeTaint(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "NUT: Ready, Probes: Successful" @@ -204,10 +188,11 @@ func NodeTaint(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go b/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go index ce04294a7..4c618df66 100644 --- a/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go +++ b/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go @@ -48,8 +48,7 @@ func PodAutoscaler(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-autoscaler experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -78,10 +77,9 @@ func PodAutoscaler(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - types.SetResultAfterCompletion(&resultDetails, "Fail", "Completed", failStep) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -95,11 +93,10 @@ func PodAutoscaler(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -109,34 +106,25 @@ func PodAutoscaler(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for pod-autoscaler - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PreparePodAutoscaler(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PreparePodAutoscaler(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - types.SetResultAfterCompletion(&resultDetails, "Fail", "Completed", failStep) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT") return } @@ -150,11 +138,10 @@ func PodAutoscaler(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -169,6 +156,7 @@ func PodAutoscaler(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } diff --git a/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go b/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go index ad59a2982..ae7e317b6 100644 --- a/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go +++ b/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go @@ -48,8 +48,7 @@ func PodCPUHogExec(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-cpu-hog-exec experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -63,9 +62,10 @@ func PodCPUHogExec(clients clients.ClientSets) { //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows", logrus.Fields{ - "Targets": chaosDetails.Targets, - "Chaos Duration": experimentsDetails.ChaosDuration, - "CPU Cores": experimentsDetails.CPUcores, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, + "Chaos Duration": experimentsDetails.ChaosDuration, + "CPU Cores": experimentsDetails.CPUcores, }) // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result @@ -76,10 +76,9 @@ func PodCPUHogExec(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -93,11 +92,10 @@ func PodCPUHogExec(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -107,34 +105,25 @@ func PodCPUHogExec(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for pod-cpu-hog-exec - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareCPUExecStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("[Error]: CPU hog failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareCPUExecStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("[Error]: CPU hog failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -147,11 +136,10 @@ func PodCPUHogExec(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -166,10 +154,11 @@ func PodCPUHogExec(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go b/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go index 16a0bea47..c06d3b684 100644 --- a/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go +++ b/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go @@ -5,7 +5,6 @@ import ( "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/lib" - pumbaLIB "github.com/litmuschaos/litmus-go/chaoslib/pumba/cpu-chaos/lib" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/environment" @@ -49,8 +48,7 @@ func PodCPUHog(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-cpu-hog experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -64,7 +62,10 @@ func PodCPUHog(clients clients.ClientSets) { //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows", logrus.Fields{ - "Targets": chaosDetails.AppDetail, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, + "Chaos Duration": experimentsDetails.ChaosDuration, + "Container Runtime": experimentsDetails.ContainerRuntime, }) // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result @@ -75,10 +76,9 @@ func PodCPUHog(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -92,11 +92,10 @@ func PodCPUHog(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -106,41 +105,25 @@ func PodCPUHog(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for pod-cpu-hog - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareAndInjectStressChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("[Error]: CPU hog failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - case "pumba": - if err := pumbaLIB.PreparePodCPUHog(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("[Error]: CPU hog failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match found for specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareAndInjectStressChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("[Error]: CPU hog failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -153,11 +136,10 @@ func PodCPUHog(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -172,10 +154,11 @@ func PodCPUHog(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-delete/experiment/pod-delete.go b/experiments/generic/pod-delete/experiment/pod-delete.go index 43c0eb56b..5d6f77a7b 100644 --- a/experiments/generic/pod-delete/experiment/pod-delete.go +++ b/experiments/generic/pod-delete/experiment/pod-delete.go @@ -1,11 +1,8 @@ package experiment import ( - "os" - "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-delete/lib" - powerfulseal "github.com/litmuschaos/litmus-go/chaoslib/powerfulseal/pod-delete/lib" "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-delete/environment" @@ -17,6 +14,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/sirupsen/logrus" + "os" ) // PodDelete inject the pod-delete chaos @@ -48,24 +46,28 @@ func PodDelete(clients clients.ClientSets) { //Updating the chaos result in the beginning of experiment log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { - log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-delete experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Unable to create the chaosresult, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid - result.SetResultUID(&resultDetails, clients, &chaosDetails) + if err := result.SetResultUID(&resultDetails, clients, &chaosDetails); err != nil { + log.Errorf("Unable to set the result uid, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + return + } // generating the event in chaosresult to marked the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if err := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); err != nil { + log.Errorf("failed to create %v event inside chaosresult", types.AwaitedVerdict) + } //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows", logrus.Fields{ - "Namespace": experimentsDetails.AppNS, - "Label": experimentsDetails.AppLabel, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), "Chaos Duration": experimentsDetails.ChaosDuration, }) @@ -77,10 +79,11 @@ func PodDelete(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("failed to create %v event inside chaosengine", types.PreChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -94,11 +97,12 @@ func PodDelete(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("failed to create %v event inside chaosengine", types.PreChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -108,41 +112,25 @@ func PodDelete(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for pod-delete - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PreparePodDelete(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - case "powerfulseal": - if err := powerfulseal.PreparePodDelete(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match found for specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PreparePodDelete(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -155,11 +143,12 @@ func PodDelete(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("failed to create %v event inside chaosengine", types.PostChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -173,11 +162,12 @@ func PodDelete(clients clients.ClientSets) { //Updating the chaosResult in the end of experiment log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { - log.Errorf("Unable to Update the Chaos Result, err: %v", err) + log.Errorf("Unable to update the chaosresult, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-dns-error/experiment/pod-dns-error.go b/experiments/generic/pod-dns-error/experiment/pod-dns-error.go index 442e1367a..0a4e2ee94 100644 --- a/experiments/generic/pod-dns-error/experiment/pod-dns-error.go +++ b/experiments/generic/pod-dns-error/experiment/pod-dns-error.go @@ -48,24 +48,25 @@ func PodDNSError(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-dns-error experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{ - "Targets": chaosDetails.AppDetail, - "Chaos Duration": experimentsDetails.ChaosDuration, - "TargetHostNames": experimentsDetails.TargetHostNames, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, + "Chaos Duration": experimentsDetails.ChaosDuration, + "Container Runtime": experimentsDetails.ContainerRuntime, + "TargetHostNames": experimentsDetails.TargetHostNames, }) // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result @@ -76,10 +77,9 @@ func PodDNSError(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -93,11 +93,10 @@ func PodDNSError(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -107,34 +106,25 @@ func PodDNSError(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -147,11 +137,10 @@ func PodDNSError(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -166,10 +155,11 @@ func PodDNSError(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go b/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go index 8a7c19f18..7aa8a27e2 100644 --- a/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go +++ b/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go @@ -49,24 +49,25 @@ func PodDNSSpoof(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-dns-spoof experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{ - "Targets": chaosDetails.AppDetail, - "Chaos Duration": experimentsDetails.ChaosDuration, - "Spoof Map": experimentsDetails.SpoofMap, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, + "Chaos Duration": experimentsDetails.ChaosDuration, + "Container Runtime": experimentsDetails.ContainerRuntime, + "Spoof Map": experimentsDetails.SpoofMap, }) // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result @@ -77,10 +78,9 @@ func PodDNSSpoof(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err = status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -95,11 +95,10 @@ func PodDNSSpoof(clients clients.ClientSets) { err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails) if err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -109,34 +108,25 @@ func PodDNSSpoof(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err = litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err = litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Confirmation]: chaos has been injected successfully") resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err = status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -149,11 +139,10 @@ func PodDNSSpoof(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -168,10 +157,11 @@ func PodDNSSpoof(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go b/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go index f7ab10046..b9728797c 100644 --- a/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go +++ b/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go @@ -48,23 +48,23 @@ func PodFioStress(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-delete experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{ - "Targets": chaosDetails.AppDetail, - "Chaos Duration": experimentsDetails.ChaosDuration, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, + "Chaos Duration": experimentsDetails.ChaosDuration, }) // Calling AbortWatcherWithoutExit go routine, it will continuously watch for the abort signal and generate the required events and result @@ -75,10 +75,9 @@ func PodFioStress(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -92,11 +91,10 @@ func PodFioStress(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -106,34 +104,25 @@ func PodFioStress(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - log.Errorf("Chaos injection failed, err: %v", err) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + log.Errorf("Chaos injection failed, err: %v", err) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -146,11 +135,10 @@ func PodFioStress(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -165,10 +153,11 @@ func PodFioStress(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-http-latency/experiment/pod-http-latency.go b/experiments/generic/pod-http-latency/experiment/pod-http-latency.go index 8e7fafa3d..d48fbcbfc 100644 --- a/experiments/generic/pod-http-latency/experiment/pod-http-latency.go +++ b/experiments/generic/pod-http-latency/experiment/pod-http-latency.go @@ -48,22 +48,22 @@ func PodHttpLatency(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-http-latency experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows\n", logrus.Fields{ - "Targets": chaosDetails.AppDetail, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, "Chaos Duration": experimentsDetails.ChaosDuration, "Container Runtime": experimentsDetails.ContainerRuntime, }) @@ -76,10 +76,9 @@ func PodHttpLatency(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -93,11 +92,10 @@ func PodHttpLatency(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -107,33 +105,25 @@ func PodHttpLatency(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - switch { - case experimentsDetails.ChaosLib == "litmus": - if err := litmusLIB.PodHttpLatencyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PodHttpLatencyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -146,11 +136,10 @@ func PodHttpLatency(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -165,10 +154,11 @@ func PodHttpLatency(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go b/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go index 34774c1d3..3cd62d631 100644 --- a/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go +++ b/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go @@ -48,22 +48,22 @@ func PodHttpModifyBody(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-http-modify-body experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{ - "Targets": chaosDetails.AppDetail, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, "Chaos Duration": experimentsDetails.ChaosDuration, "Container Runtime": experimentsDetails.ContainerRuntime, }) @@ -76,10 +76,9 @@ func PodHttpModifyBody(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -93,11 +92,10 @@ func PodHttpModifyBody(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -107,34 +105,25 @@ func PodHttpModifyBody(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PodHttpModifyBodyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match found for specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PodHttpModifyBodyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -147,11 +136,10 @@ func PodHttpModifyBody(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -166,10 +154,11 @@ func PodHttpModifyBody(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go b/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go index ebbb83128..47f2db10a 100644 --- a/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go +++ b/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go @@ -48,22 +48,22 @@ func PodHttpModifyHeader(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-http-modify-header experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows\n", logrus.Fields{ - "Targets": chaosDetails.AppDetail, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, "Chaos Duration": experimentsDetails.ChaosDuration, "Container Runtime": experimentsDetails.ContainerRuntime, }) @@ -76,10 +76,9 @@ func PodHttpModifyHeader(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -93,11 +92,10 @@ func PodHttpModifyHeader(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -107,33 +105,25 @@ func PodHttpModifyHeader(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - switch { - case experimentsDetails.ChaosLib == "litmus": - if err := litmusLIB.PodHttpModifyHeaderChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PodHttpModifyHeaderChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -146,11 +136,10 @@ func PodHttpModifyHeader(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -165,10 +154,11 @@ func PodHttpModifyHeader(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go b/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go index b6d15e658..c394f9e54 100644 --- a/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go +++ b/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go @@ -48,23 +48,24 @@ func PodHttpResetPeer(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-http-reset-peer experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{ - "Targets": chaosDetails.AppDetail, - "Chaos Duration": experimentsDetails.ChaosDuration, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, + "Chaos Duration": experimentsDetails.ChaosDuration, + "Container Runtime": experimentsDetails.ContainerRuntime, }) // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result @@ -75,10 +76,9 @@ func PodHttpResetPeer(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -92,11 +92,10 @@ func PodHttpResetPeer(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -106,34 +105,25 @@ func PodHttpResetPeer(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PodHttpResetPeerChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match found for specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PodHttpResetPeerChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -146,11 +136,10 @@ func PodHttpResetPeer(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -165,10 +154,11 @@ func PodHttpResetPeer(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go b/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go index 6622e725a..9eba3f09c 100644 --- a/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go +++ b/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go @@ -49,22 +49,22 @@ func PodHttpStatusCode(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-http-status-code experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{ - "Targets": chaosDetails.AppDetail, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, "Chaos Duration": experimentsDetails.ChaosDuration, "Container Runtime": experimentsDetails.ContainerRuntime, }) @@ -75,8 +75,7 @@ func PodHttpStatusCode(clients clients.ClientSets) { // PRE-CHAOS check to verify support for provided status code value if experimentsDetails.StatusCode, err = litmusLIB.GetStatusCode(experimentsDetails.StatusCode); err != nil { log.Errorf("[Pre-Chaos]: Failed to verify status code support, err: %v", err) - failStep := "[pre-chaos]: Status code not supported. Provide a valid status code" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -85,10 +84,9 @@ func PodHttpStatusCode(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -102,11 +100,10 @@ func PodHttpStatusCode(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -116,34 +113,25 @@ func PodHttpStatusCode(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PodHttpStatusCodeChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match found for specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PodHttpStatusCodeChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -156,11 +144,10 @@ func PodHttpStatusCode(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -175,10 +162,11 @@ func PodHttpStatusCode(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-io-stress/experiment/pod-io-stress.go b/experiments/generic/pod-io-stress/experiment/pod-io-stress.go index efcdece2c..93555a5cb 100644 --- a/experiments/generic/pod-io-stress/experiment/pod-io-stress.go +++ b/experiments/generic/pod-io-stress/experiment/pod-io-stress.go @@ -5,7 +5,6 @@ import ( "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/lib" - pumbaLIB "github.com/litmuschaos/litmus-go/chaoslib/pumba/pod-io-stress/lib" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/environment" @@ -49,23 +48,24 @@ func PodIOStress(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of " + experimentsDetails.ExperimentName + " experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows", logrus.Fields{ - "Targets": chaosDetails.AppDetail, - "Chaos Duration": experimentsDetails.ChaosDuration, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, + "Chaos Duration": experimentsDetails.ChaosDuration, + "Container Runtime": experimentsDetails.ContainerRuntime, }) // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result @@ -76,10 +76,9 @@ func PodIOStress(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -93,11 +92,10 @@ func PodIOStress(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -107,41 +105,25 @@ func PodIOStress(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for pod-io-stress - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareAndInjectStressChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("[Error]: Pod IO Stress failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - case "pumba": - if err := pumbaLIB.PreparePodIOStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("[Error]: pod io stress chaos failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareAndInjectStressChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("[Error]: Pod IO Stress failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -154,11 +136,10 @@ func PodIOStress(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -173,10 +154,11 @@ func PodIOStress(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go b/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go index b2ad7b4aa..f602ac7fd 100644 --- a/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go +++ b/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go @@ -48,22 +48,22 @@ func PodMemoryHogExec(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-memory-hog-exec experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows", logrus.Fields{ - "Targets": chaosDetails.AppDetail, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, "Chaos Duration": experimentsDetails.ChaosDuration, "Memory Consumption": experimentsDetails.MemoryConsumption, }) @@ -76,10 +76,9 @@ func PodMemoryHogExec(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -93,11 +92,10 @@ func PodMemoryHogExec(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -107,34 +105,25 @@ func PodMemoryHogExec(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for pod-memory-hog-exec - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareMemoryExecStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("[Error]: pod memory hog failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareMemoryExecStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("[Error]: pod memory hog failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -147,11 +136,10 @@ func PodMemoryHogExec(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -166,10 +154,11 @@ func PodMemoryHogExec(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go b/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go index 4c758e8df..a7f5a0899 100644 --- a/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go +++ b/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go @@ -5,7 +5,6 @@ import ( "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/lib" - pumbaLIB "github.com/litmuschaos/litmus-go/chaoslib/pumba/memory-chaos/lib" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/environment" @@ -49,22 +48,22 @@ func PodMemoryHog(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-memory-hog experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows", logrus.Fields{ - "Targets": chaosDetails.AppDetail, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, "Chaos Duration": experimentsDetails.ChaosDuration, "Container Runtime": experimentsDetails.ContainerRuntime, }) @@ -77,10 +76,9 @@ func PodMemoryHog(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed,, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -94,11 +92,10 @@ func PodMemoryHog(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -108,41 +105,25 @@ func PodMemoryHog(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib for pod-memory-hog - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareAndInjectStressChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("[Error]: pod memory hog failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - case "pumba": - if err := pumbaLIB.PreparePodMemoryHog(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("[Error]: Memory hog failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareAndInjectStressChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("[Error]: pod memory hog failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -155,11 +136,10 @@ func PodMemoryHog(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -174,6 +154,7 @@ func PodMemoryHog(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } diff --git a/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go b/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go index e78996c8f..00d879b07 100644 --- a/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go +++ b/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go @@ -5,7 +5,6 @@ import ( "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib/corruption" - pumbaLIB "github.com/litmuschaos/litmus-go/chaoslib/pumba/network-chaos/lib/corruption" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/environment" @@ -49,26 +48,25 @@ func PodNetworkCorruption(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-network-corruption experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows\n", logrus.Fields{ - "Namespace": experimentsDetails.AppNS, - "Label": experimentsDetails.AppLabel, - "Curruption Percentage": experimentsDetails.NetworkPacketCorruptionPercentage, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, "Chaos Duration": experimentsDetails.ChaosDuration, "Container Runtime": experimentsDetails.ContainerRuntime, + "Curruption Percentage": experimentsDetails.NetworkPacketCorruptionPercentage, }) // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result @@ -79,10 +77,9 @@ func PodNetworkCorruption(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -96,11 +93,10 @@ func PodNetworkCorruption(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -110,41 +106,25 @@ func PodNetworkCorruption(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the pumba lib for pod-network-corruption - switch { - case experimentsDetails.ChaosLib == "pumba" && experimentsDetails.ContainerRuntime == "docker": - if err := pumbaLIB.PodNetworkCorruptionChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - case experimentsDetails.ChaosLib == "litmus": - if err := litmusLIB.PodNetworkCorruptionChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PodNetworkCorruptionChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -157,11 +137,10 @@ func PodNetworkCorruption(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -176,10 +155,11 @@ func PodNetworkCorruption(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go b/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go index d9f3bf179..83d126304 100644 --- a/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go +++ b/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go @@ -5,7 +5,6 @@ import ( "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib/duplication" - pumbaLIB "github.com/litmuschaos/litmus-go/chaoslib/pumba/network-chaos/lib/duplication" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/environment" @@ -49,26 +48,25 @@ func PodNetworkDuplication(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-network-duplication experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows\n", logrus.Fields{ - "Namespace": experimentsDetails.AppNS, - "Label": experimentsDetails.AppLabel, - "Duplication Percentage": experimentsDetails.NetworkPacketDuplicationPercentage, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, "Chaos Duration": experimentsDetails.ChaosDuration, "Container Runtime": experimentsDetails.ContainerRuntime, + "Duplication Percentage": experimentsDetails.NetworkPacketDuplicationPercentage, }) // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result @@ -79,10 +77,9 @@ func PodNetworkDuplication(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -96,11 +93,10 @@ func PodNetworkDuplication(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -110,41 +106,25 @@ func PodNetworkDuplication(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the pumba lib for pod-network-duplication - switch { - case experimentsDetails.ChaosLib == "pumba" && experimentsDetails.ContainerRuntime == "docker": - if err := pumbaLIB.PodNetworkDuplicationChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - case experimentsDetails.ChaosLib == "litmus": - if err := litmusLIB.PodNetworkDuplicationChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PodNetworkDuplicationChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -157,11 +137,10 @@ func PodNetworkDuplication(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -176,10 +155,11 @@ func PodNetworkDuplication(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-network-latency/experiment/pod-network-latency.go b/experiments/generic/pod-network-latency/experiment/pod-network-latency.go index 5a29f535d..0f7d7d760 100644 --- a/experiments/generic/pod-network-latency/experiment/pod-network-latency.go +++ b/experiments/generic/pod-network-latency/experiment/pod-network-latency.go @@ -5,7 +5,6 @@ import ( "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib/latency" - pumbaLIB "github.com/litmuschaos/litmus-go/chaoslib/pumba/network-chaos/lib/latency" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/environment" @@ -49,26 +48,25 @@ func PodNetworkLatency(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-network-latency experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows\n", logrus.Fields{ - "Namespace": experimentsDetails.AppNS, - "Label": experimentsDetails.AppLabel, - "Latency": experimentsDetails.NetworkLatency, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, "Chaos Duration": experimentsDetails.ChaosDuration, "Container Runtime": experimentsDetails.ContainerRuntime, + "Latency": experimentsDetails.NetworkLatency, }) // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result @@ -79,10 +77,9 @@ func PodNetworkLatency(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -95,11 +92,10 @@ func PodNetworkLatency(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -110,41 +106,25 @@ func PodNetworkLatency(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the pumba lib for pod-network-latency - switch { - case experimentsDetails.ChaosLib == "pumba" && experimentsDetails.ContainerRuntime == "docker": - if err := pumbaLIB.PodNetworkLatencyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - case experimentsDetails.ChaosLib == "litmus": - if err := litmusLIB.PodNetworkLatencyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PodNetworkLatencyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -157,11 +137,10 @@ func PodNetworkLatency(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -176,10 +155,11 @@ func PodNetworkLatency(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-network-loss/experiment/pod-network-loss.go b/experiments/generic/pod-network-loss/experiment/pod-network-loss.go index bb8b518e7..8fbe0ca9c 100644 --- a/experiments/generic/pod-network-loss/experiment/pod-network-loss.go +++ b/experiments/generic/pod-network-loss/experiment/pod-network-loss.go @@ -5,7 +5,6 @@ import ( "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib/loss" - pumbaLIB "github.com/litmuschaos/litmus-go/chaoslib/pumba/network-chaos/lib/loss" clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/environment" @@ -44,30 +43,29 @@ func PodNetworkLoss(clients clients.ClientSets) { } } - //Updating the chaos result in the beginningo f experiment + //Updating the chaos result in the beginning of experiment log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-network-loss experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("The application information is as follows\n", logrus.Fields{ - "Namespace": experimentsDetails.AppNS, - "Label": experimentsDetails.AppLabel, - "Loss Percentage": experimentsDetails.NetworkPacketLossPercentage, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, "Chaos Duration": experimentsDetails.ChaosDuration, "Container Runtime": experimentsDetails.ContainerRuntime, + "Loss Percentage": experimentsDetails.NetworkPacketLossPercentage, }) // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result @@ -78,10 +76,9 @@ func PodNetworkLoss(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -95,11 +92,10 @@ func PodNetworkLoss(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -109,41 +105,25 @@ func PodNetworkLoss(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the pumba lib for pod-network-loss - switch { - case experimentsDetails.ChaosLib == "pumba" && experimentsDetails.ContainerRuntime == "docker": - if err := pumbaLIB.PodNetworkLossChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - case experimentsDetails.ChaosLib == "litmus": - if err := litmusLIB.PodNetworkLossChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PodNetworkLossChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -156,11 +136,10 @@ func PodNetworkLoss(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -175,10 +154,11 @@ func PodNetworkLoss(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/generic/pod-network-partition/experiment/pod-network-partition.go b/experiments/generic/pod-network-partition/experiment/pod-network-partition.go index b7b7c444c..5fee9f127 100644 --- a/experiments/generic/pod-network-partition/experiment/pod-network-partition.go +++ b/experiments/generic/pod-network-partition/experiment/pod-network-partition.go @@ -48,24 +48,23 @@ func PodNetworkPartition(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-network-partition experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } // Set the chaos result uid result.SetResultUID(&resultDetails, clients, &chaosDetails) - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") //DISPLAY THE APP INFORMATION log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{ - "App Namespace": experimentsDetails.AppNS, - "App Label": experimentsDetails.AppLabel, - "Chaos Duration": experimentsDetails.ChaosDuration, + "Targets": common.GetAppDetailsForLogging(chaosDetails.AppDetail), + "Target Container": experimentsDetails.TargetContainer, + "Chaos Duration": experimentsDetails.ChaosDuration, }) // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result @@ -76,10 +75,9 @@ func PodNetworkPartition(clients clients.ClientSets) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -93,11 +91,10 @@ func PodNetworkPartition(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -107,34 +104,25 @@ func PodNetworkPartition(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - failStep := "failed in chaos injection phase" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - log.Errorf("Chaos injection failed, err: %v", err) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + log.Errorf("Chaos injection failed, err: %v", err) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase //POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -147,11 +135,10 @@ func PodNetworkPartition(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -166,10 +153,11 @@ func PodNetworkPartition(clients clients.ClientSets) { log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } - // generating the event in chaosresult to marked the verdict as pass/fail + // generating the event in chaosresult to mark the verdict as pass/fail msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) reason := types.PassVerdict eventType := "Normal" diff --git a/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go b/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go index 60e1fbe64..64b8453bf 100644 --- a/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go +++ b/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go @@ -50,8 +50,7 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ChaoslibDetail.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of kafka-broker-pod-failure experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -76,10 +75,9 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { log.Info("[Status]: Verify that the Kafka cluster is healthy(pre-chaos)") if err := kafka.ClusterHealthCheck(&experimentsDetails, clients); err != nil { log.Errorf("Cluster health check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Kafka cluster is healthy, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -93,11 +91,10 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -113,8 +110,7 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { livenessTopicLeader, err := kafka.LivenessStream(&experimentsDetails, clients) if err != nil { log.Errorf("Liveness check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify liveness check, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("The Liveness pod gets established") @@ -127,34 +123,27 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { kafka.DisplayKafkaBroker(&experimentsDetails) - // Including the litmus lib for kafka-broker-pod-failure - switch experimentsDetails.ChaoslibDetail.ChaosLib { - case "litmus": - if err := kafkaPodDelete.PreparePodDelete(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed in chaos injection phase, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err := kafkaPodDelete.PreparePodDelete(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + // POST-CHAOS KAFKA CLUSTER HEALTH CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the Kafka cluster is healthy(post-chaos)") if err := kafka.ClusterHealthCheck(&experimentsDetails, clients); err != nil { log.Errorf("Cluster health check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Kafka cluster is healthy, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -167,11 +156,10 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") @@ -188,16 +176,14 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { log.Info("[Status]: Verify that the Kafka liveness pod is running(post-chaos)") if err := status.CheckApplicationStatusesByLabels(experimentsDetails.ChaoslibDetail.AppNS, "name=kafka-liveness-"+experimentsDetails.RunID, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil { log.Errorf("Application liveness status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the liveness pod is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[CleanUp]: Deleting the kafka liveness pod(post-chaos)") if err := kafka.LivenessCleanup(&experimentsDetails, clients); err != nil { log.Errorf("liveness cleanup failed, err: %v", err) - failStep := "[post-chaos]: Failed to perform liveness pod cleanup, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } diff --git a/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go b/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go index f1f52577f..2701ee442 100644 --- a/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go +++ b/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go @@ -40,7 +40,7 @@ func EBSLossByID(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // Initialize the probe details. Bail out upon error, as we haven't entered exp business logic yet if err = probe.InitializeProbesInChaosResultDetails(&chaosDetails, clients, &resultDetails); err != nil { - log.Errorf("unable to initialize the probes, err: %v", err) + log.Errorf("Unable to initialize the probes: %v", err) return } } @@ -48,9 +48,8 @@ func EBSLossByID(clients clients.ClientSets) { //Updating the chaos result in the beginning of experiment log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { - log.Errorf("unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of ebs-loss experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Unable to create the chaosresult: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -60,7 +59,9 @@ func EBSLossByID(clients clients.ClientSets) { // generating the event in chaosresult to marked the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict) + } // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) @@ -76,9 +77,8 @@ func EBSLossByID(clients clients.ClientSets) { //Verify the aws ec2 instance is attached to ebs volume if chaosDetails.DefaultHealthCheck { if err = aws.EBSStateCheckByID(experimentsDetails.EBSVolumeID, experimentsDetails.Region); err != nil { - log.Errorf("volume status check failed pre chaos, err: %v", err) - failStep := "[pre-chaos]: Failed to verify if the ebs volume is attached to ec2 instance, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Volume status check failed pre chaos: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -91,46 +91,42 @@ func EBSLossByID(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { - log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } } - // Including the litmus lib for ebs-loss - switch experimentsDetails.ChaosLib { - case "litmus": - if err = litmusLIB.PrepareEBSLossByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err = litmusLIB.PrepareEBSLossByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + if chaosDetails.DefaultHealthCheck { //Verify the aws ec2 instance is attached to ebs volume if err = aws.EBSStateCheckByID(experimentsDetails.EBSVolumeID, experimentsDetails.Region); err != nil { - log.Errorf("volume status check failed post chaos, err: %v", err) - failStep := "[post-chaos]: Failed to verify if the ebs volume is attached to an ec2 instance, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Volume status check failed post chaos: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -142,12 +138,13 @@ func EBSLossByID(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { - log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -155,13 +152,15 @@ func EBSLossByID(clients clients.ClientSets) { // generating post chaos event types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } } //Updating the chaosResult in the end of experiment log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { - log.Errorf("unable to Update the Chaos Result, err: %v", err) + log.Errorf("Unable to update the chaosresult: %v", err) return } @@ -174,12 +173,16 @@ func EBSLossByID(clients clients.ClientSets) { eventType = "Warning" } types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", reason) + } if experimentsDetails.EngineName != "" { msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed" types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.Summary) + } } } diff --git a/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go b/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go index c400f6cd8..586555212 100644 --- a/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go +++ b/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go @@ -39,7 +39,7 @@ func EBSLossByTag(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // Initialize the probe details. Bail out upon error, as we haven't entered exp business logic yet if err := probe.InitializeProbesInChaosResultDetails(&chaosDetails, clients, &resultDetails); err != nil { - log.Errorf("unable to initialize the probes, err: %v", err) + log.Errorf("Unable to initialize the probes: %v", err) return } } @@ -47,9 +47,8 @@ func EBSLossByTag(clients clients.ClientSets) { //Updating the chaos result in the beginning of experiment log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { - log.Errorf("unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of ebs-loss experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Unable to create the chaosresult: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -59,7 +58,9 @@ func EBSLossByTag(clients clients.ClientSets) { // generating the event in chaosresult to marked the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict) + } // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) @@ -75,9 +76,8 @@ func EBSLossByTag(clients clients.ClientSets) { //selecting the target volumes (pre chaos) //if no volumes found in attached state then this check will fail if err := aws.SetTargetVolumeIDs(&experimentsDetails); err != nil { - log.Errorf("failed to set the volumes under chaos, err: %v", err) - failStep := "[pre-chaos]: Failed to select the target EBS volumes from tag, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Failed to set the volumes under chaos: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -89,46 +89,42 @@ func EBSLossByTag(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { - log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } } - // Including the litmus lib for ebs-loss - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareEBSLossByTag(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err := litmusLIB.PrepareEBSLossByTag(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + if chaosDetails.DefaultHealthCheck { //Verify the aws ec2 instance is attached to ebs volume if err := aws.PostChaosVolumeStatusCheck(&experimentsDetails); err != nil { - log.Errorf("failed to verify the ebs volume is attached to an instance, err: %v", err) - failStep := "[post-chaos]: Failed to verify if the ebs volume is attached to an instance, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Failed to verify that the EBS volume is attached to an instance: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -140,12 +136,13 @@ func EBSLossByTag(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { - log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -153,13 +150,15 @@ func EBSLossByTag(clients clients.ClientSets) { // generating post chaos event types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } } //Updating the chaosResult in the end of experiment log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { - log.Errorf("unable to Update the Chaos Result, err: %v", err) + log.Errorf("Unable to update the chaosresult: %v", err) return } @@ -172,12 +171,16 @@ func EBSLossByTag(clients clients.ClientSets) { eventType = "Warning" } types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", reason) + } if experimentsDetails.EngineName != "" { msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed" types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.Summary) + } } } diff --git a/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go b/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go index b63442d00..9cf030cf8 100644 --- a/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go +++ b/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go @@ -45,7 +45,7 @@ func EC2TerminateByID(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // Initialize the probe details. Bail out upon error, as we haven't entered exp business logic yet if err = probe.InitializeProbesInChaosResultDetails(&chaosDetails, clients, &resultDetails); err != nil { - log.Errorf("Unable to initialize the probes, err: %v", err) + log.Errorf("Unable to initialize the probes: %v", err) return } } @@ -53,9 +53,8 @@ func EC2TerminateByID(clients clients.ClientSets) { //Updating the chaos result in the beginning of experiment log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { - log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of ec2 terminate experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Unable to create the chaosresult: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -65,7 +64,9 @@ func EC2TerminateByID(clients clients.ClientSets) { // generating the event in chaosresult to marked the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict) + } //DISPLAY THE INSTANCE INFORMATION log.InfoWithValues("The instance information is as follows", logrus.Fields{ @@ -86,28 +87,30 @@ func EC2TerminateByID(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { - log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } } //Verify the aws ec2 instance is running (pre chaos) if chaosDetails.DefaultHealthCheck && experimentsDetails.ManagedNodegroup != "enable" { log.Info("[Status]: Verify that the aws ec2 instances are in running state (pre-chaos)") if err = aws.InstanceStatusCheckByID(experimentsDetails.Ec2InstanceID, experimentsDetails.Region); err != nil { - log.Errorf("failed to get the ec2 instance status, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("EC2 instance status check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: EC2 instance is in running state") @@ -118,39 +121,31 @@ func EC2TerminateByID(clients clients.ClientSets) { log.Info("[Status]: Counting number of active nodes in the node group (pre-chaos)") activeNodeCount, autoScalingGroupName, err = aws.PreChaosNodeCountCheck(strings.Split(experimentsDetails.Ec2InstanceID, ","), experimentsDetails.Region) if err != nil { - log.Errorf("Pre chaos node status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the NUT (Node Under Test) is running (pre-chaos), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Pre chaos node status check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } - // Including the litmus lib for ec2-terminate - switch experimentsDetails.ChaosLib { - case "litmus": - if err = litmusLIB.PrepareEC2TerminateByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err = litmusLIB.PrepareEC2TerminateByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + //Verify the aws ec2 instance is running (post chaos) if chaosDetails.DefaultHealthCheck && experimentsDetails.ManagedNodegroup != "enable" { log.Info("[Status]: Verify that the aws ec2 instances are in running state (post-chaos)") if err = aws.InstanceStatusCheckByID(experimentsDetails.Ec2InstanceID, experimentsDetails.Region); err != nil { - log.Errorf("failed to get the ec2 instance status, err: %v", err) - failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("EC2 instance status check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: EC2 instance is in running state (post chaos)") @@ -160,9 +155,8 @@ func EC2TerminateByID(clients clients.ClientSets) { if experimentsDetails.ManagedNodegroup == "enable" { log.Info("[Status]: Counting and verifying number of active nodes in the node group (post-chaos)") if err := aws.PostChaosNodeCountCheck(activeNodeCount, autoScalingGroupName, experimentsDetails.Region); err != nil { - log.Errorf("Post chaos active node count check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify the active number of nodes, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Post chaos active node count check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -174,12 +168,13 @@ func EC2TerminateByID(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { - log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -187,13 +182,15 @@ func EC2TerminateByID(clients clients.ClientSets) { // generating post chaos event types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } } //Updating the chaosResult in the end of experiment log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { - log.Errorf("Unable to Update the Chaos Result, err: %v", err) + log.Errorf("Unable to update the chaosresult: %v", err) return } @@ -206,12 +203,16 @@ func EC2TerminateByID(clients clients.ClientSets) { eventType = "Warning" } types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", reason) + } if experimentsDetails.EngineName != "" { msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed" types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.Summary) + } } } diff --git a/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go b/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go index dcb52e385..89361fe5d 100644 --- a/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go +++ b/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go @@ -44,7 +44,7 @@ func EC2TerminateByTag(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // Initialize the probe details. Bail out upon error, as we haven't entered exp business logic yet if err = probe.InitializeProbesInChaosResultDetails(&chaosDetails, clients, &resultDetails); err != nil { - log.Errorf("Unable to initialize the probes, err: %v", err) + log.Errorf("Unable to initialize the probes: %v", err) return } } @@ -52,9 +52,8 @@ func EC2TerminateByTag(clients clients.ClientSets) { //Updating the chaos result in the beginning of experiment log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { - log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of ec2 terminate experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Unable to create the chaosresult: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -64,7 +63,9 @@ func EC2TerminateByTag(clients clients.ClientSets) { // generating the event in chaosresult to marked the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict) + } //DISPLAY THE INSTANCE INFORMATION log.InfoWithValues("The instance information is as follows", logrus.Fields{ @@ -86,26 +87,28 @@ func EC2TerminateByTag(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { - log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } } //selecting the target instance (pre chaos) if err = litmusLIB.SetTargetInstance(&experimentsDetails); err != nil { - log.Errorf("failed to get the target ec2 instance, err: %v", err) - failStep := "[pre-chaos]: Failed to select the target AWS ec2 instance from tag, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Failed to get the target ec2 instance: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -114,39 +117,31 @@ func EC2TerminateByTag(clients clients.ClientSets) { log.Info("[Status]: Counting number of active nodes in the node group (pre-chaos)") activeNodeCount, autoScalingGroupName, err = aws.PreChaosNodeCountCheck(experimentsDetails.TargetInstanceIDList, experimentsDetails.Region) if err != nil { - log.Errorf("Pre chaos node status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the NUT (Node Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Pre chaos node status check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } - // Including the litmus lib for ec2-terminate - switch experimentsDetails.ChaosLib { - case "litmus": - if err = litmusLIB.PrepareEC2TerminateByTag(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err = litmusLIB.PrepareEC2TerminateByTag(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + //Verify the aws ec2 instance is running (post chaos) if chaosDetails.DefaultHealthCheck && experimentsDetails.ManagedNodegroup != "enable" { log.Info("[Status]: Verify that the aws ec2 instances are in running state (post-chaos)") if err = aws.InstanceStatusCheck(experimentsDetails.TargetInstanceIDList, experimentsDetails.Region); err != nil { - log.Errorf("failed to get the ec2 instance status as running post chaos, err: %v", err) - failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Failed to get the ec2 instance status as running post chaos: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Status]: EC2 instance is in running state (post chaos)") @@ -156,9 +151,8 @@ func EC2TerminateByTag(clients clients.ClientSets) { if experimentsDetails.ManagedNodegroup == "enable" { log.Info("[Status]: Counting and verifying number of active nodes in the node group (post-chaos)") if err = aws.PostChaosNodeCountCheck(activeNodeCount, autoScalingGroupName, experimentsDetails.Region); err != nil { - log.Errorf("Post chaos active node count check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify the active number of nodes, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Post chaos active node count check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -169,12 +163,13 @@ func EC2TerminateByTag(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { - log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -182,13 +177,15 @@ func EC2TerminateByTag(clients clients.ClientSets) { // generating post chaos event types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } } //Updating the chaosResult in the end of experiment log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { - log.Errorf("Unable to Update the Chaos Result, err: %v", err) + log.Errorf("Unable to update the chaosresult: %v", err) return } @@ -201,12 +198,16 @@ func EC2TerminateByTag(clients clients.ClientSets) { eventType = "Warning" } types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", reason) + } if experimentsDetails.EngineName != "" { msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed" types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.Summary) + } } } diff --git a/experiments/spring-boot/spring-boot-faults/experiment/spring-boot-faults.go b/experiments/spring-boot/spring-boot-faults/experiment/spring-boot-faults.go index 4d9948280..07baeb092 100644 --- a/experiments/spring-boot/spring-boot-faults/experiment/spring-boot-faults.go +++ b/experiments/spring-boot/spring-boot-faults/experiment/spring-boot-faults.go @@ -48,8 +48,7 @@ func Experiment(clients clients.ClientSets, expName string) { log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of spring-boot-chaos experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -75,10 +74,9 @@ func Experiment(clients clients.ClientSets, expName string) { log.Infof("[PreCheck]: Getting targeted pods list") if err := litmusLIB.SetTargetPodList(&experimentsDetails, clients, &chaosDetails); err != nil { log.Errorf("Failed to get target pod list, err: %v", err) - failStep := "[pre-chaos]: Failed to get pod list, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "Pods: Not Found", "Warning", &chaosDetails) _ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } podNames := make([]string, 0, 1) @@ -91,10 +89,9 @@ func Experiment(clients clients.ClientSets, expName string) { log.Infof("[PreCheck]: Checking for ChaosMonkey endpoint in target pods") if _, err := litmusLIB.CheckChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, experimentsDetails.TargetPodList); err != nil { log.Errorf("Some target pods don't have the chaos monkey endpoint, err: %v", err) - failStep := "[pre-chaos]: Some target pods don't have the chaos monkey endpoint, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "ChaosMonkey: Not Found", "Warning", &chaosDetails) _ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -103,10 +100,9 @@ func Experiment(clients clients.ClientSets, expName string) { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) _ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -119,11 +115,10 @@ func Experiment(clients clients.ClientSets, expName string) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) _ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" @@ -133,34 +128,27 @@ func Experiment(clients clients.ClientSets, expName string) { _ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match found for specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + // POST-CHAOS APPLICATION STATUS CHECK if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) _ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } } @@ -173,11 +161,10 @@ func Experiment(clients clients.ClientSets, expName string) { if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) _ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "AUT: Running, Probes: Successful" diff --git a/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go b/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go index f967a4af0..1c94c2670 100644 --- a/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go +++ b/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go @@ -40,7 +40,7 @@ func VMPoweroff(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // Initialize the probe details. Bail out upon error, as we haven't entered exp business logic yet if err := probe.InitializeProbesInChaosResultDetails(&chaosDetails, clients, &resultDetails); err != nil { - log.Errorf("Unable to initialize the probes, err: %v", err) + log.Errorf("Unable to initialize the probes: %v", err) return } } @@ -48,9 +48,8 @@ func VMPoweroff(clients clients.ClientSets) { //Updating the chaos result in the beginning of experiment log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { - log.Errorf("Unable to Create the Chaos Result, err: %v", err) - failStep := "[pre-chaos]: Failed to update the chaos result of pod-delete experiment (SOT), err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("Unable to create the chaosresult: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } @@ -60,7 +59,9 @@ func VMPoweroff(clients clients.ClientSets) { // generating the event in chaosresult to marked the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict) + } //DISPLAY THE VM INFORMATION log.InfoWithValues("[Info]: The Instance information is as follows", logrus.Fields{ @@ -75,18 +76,16 @@ func VMPoweroff(clients clients.ClientSets) { // GET SESSION ID TO LOGIN TO VCENTER cookie, err := vmware.GetVcenterSessionID(experimentsDetails.VcenterServer, experimentsDetails.VcenterUser, experimentsDetails.VcenterPass) if err != nil { - failStep := "[pre-chaos]: Failed to obtain the Vcenter session ID, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - log.Errorf("Vcenter Login failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + log.Errorf("Vcenter Login failed: %v", err) return } if chaosDetails.DefaultHealthCheck { // PRE-CHAOS VM STATUS CHECK if err := vmware.VMStatusCheck(experimentsDetails.VcenterServer, experimentsDetails.VMIds, cookie); err != nil { - log.Errorf("Failed to get the VM status, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the VM status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("VM status check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Verification]: VMs are in running state (pre-chaos)") @@ -100,47 +99,43 @@ func VMPoweroff(clients clients.ClientSets) { if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { - log.Errorf("Probe Failed, err: %v", err) - failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probe Failed: %v", err) msg := "IUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "IUT: Running, Probes: Successful" } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck) + } } - // Including the litmus lib - switch experimentsDetails.ChaosLib { - case "litmus": - if err = litmusLIB.InjectVMPowerOffChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails, cookie); err != nil { - log.Errorf("Chaos injection failed, err: %v", err) - failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - default: - log.Error("[Invalid]: Please Provide the correct LIB") - failStep := "[chaos]: no match was found for the specified lib" - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + chaosDetails.Phase = types.ChaosInjectPhase + + if err = litmusLIB.InjectVMPowerOffChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails, cookie); err != nil { + log.Errorf("Chaos injection failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + if chaosDetails.DefaultHealthCheck { //POST-CHAOS VM STATUS CHECK log.Info("[Status]: Verify that the IUT (Instance Under Test) is running (post-chaos)") if err := vmware.VMStatusCheck(experimentsDetails.VcenterServer, experimentsDetails.VMIds, cookie); err != nil { - log.Errorf("Failed to get the VM status, err: %v", err) - failStep := "[post-chaos]: Failed to get the VM status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + log.Errorf("VM status check failed: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } log.Info("[Verification]: VMs are in running state (post-chaos)") @@ -153,12 +148,13 @@ func VMPoweroff(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { - log.Errorf("Probes Failed, err: %v", err) - failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() + log.Errorf("Probes Failed: %v", err) msg := "IUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return } msg = "IUT: Running, Probes: Successful" @@ -166,13 +162,15 @@ func VMPoweroff(clients clients.ClientSets) { // generating post chaos event types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck) + } } //Updating the chaosResult in the end of experiment log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { - log.Errorf("Unable to Update the Chaos Result, err: %v", err) + log.Errorf("Unable to update the chaosresult: %v", err) return } @@ -185,11 +183,14 @@ func VMPoweroff(clients clients.ClientSets) { eventType = "Warning" } types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") - + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosresult", reason) + } if experimentsDetails.EngineName != "" { msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed" types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil { + log.Errorf("Failed to create %v event inside chaosengine", types.Summary) + } } } diff --git a/go.mod b/go.mod index 6c2e490f1..99d64ff30 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/litmuschaos/litmus-go -go 1.17 +go 1.18 require ( github.com/Azure/azure-sdk-for-go v56.1.0+incompatible @@ -9,7 +9,8 @@ require ( github.com/aws/aws-sdk-go v1.38.59 github.com/containerd/cgroups v1.0.1 github.com/kyokomi/emoji v2.2.4+incompatible - github.com/litmuschaos/chaos-operator v0.0.0-20221121155030-e5b440b5cd82 + github.com/litmuschaos/chaos-operator v0.0.0-20221124094152-26a41c021642 + github.com/palantir/stacktrace v0.0.0-20161112013806-78658fd2d177 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.7.0 github.com/spf13/cobra v1.1.1 diff --git a/go.sum b/go.sum index 7407c3691..698689b1f 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,11 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.49.0/go.mod h1:hGvAdzcWNbyuxS3nWhD7H2cIJxjRRTRLQVB0bdputVY= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= @@ -39,36 +36,24 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+ cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.3.0/go.mod h1:9IAwXhoyBJ7z9LcAwkj0/7NnPzYaPeZxxVp3zm+5IqA= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v56.1.0+incompatible h1:Ofcecdw3F1ZqnpDEZcLzH9Hq0P4Y5Si8+EioXJSamJs= github.com/Azure/azure-sdk-for-go v56.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= @@ -78,12 +63,10 @@ github.com/Azure/go-autorest/autorest/azure/auth v0.5.7/go.mod h1:AkzUsqkrdmNhfP github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= @@ -101,28 +84,15 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig/v3 v3.1.0/go.mod h1:ONGMf7UfYGAbMXCZmQLy8x3lCDIPrEZE/rU8pmrbihA= -github.com/Masterminds/squirrel v1.2.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA= -github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -131,9 +101,6 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -142,68 +109,41 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/aws/aws-sdk-go v1.38.59 h1:rGEMmHdgXSjA2gkdo8Hdwai9mND5X0i+hZetYfABo7g= github.com/aws/aws-sdk-go v1.38.59/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/bazelbuild/bazel-gazelle v0.18.2/go.mod h1:D0ehMSbS+vesFsLGiD6JXu3mVEzOlfUl8wNnq+x/9p0= github.com/bazelbuild/bazel-gazelle v0.19.1-0.20191105222053-70208cbdc798/go.mod h1:rPwzNHUqEzngx1iVBfO/2X2npKaT3tqPqqHW6rVsn/A= github.com/bazelbuild/buildtools v0.0.0-20190731111112-f720930ceb60/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= -github.com/brancz/gojsontoyaml v0.0.0-20191212081931-bf2969bbd742/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= -github.com/brancz/kube-rbac-proxy v0.5.0/go.mod h1:cL2VjiIFGS90Cjh5ZZ8+It6tMcBt8rwvuw2J6Mamnl0= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= -github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= -github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= -github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/checkpoint-restore/go-criu v0.0.0-20181120144056-17b0214f6c48/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= @@ -213,36 +153,18 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.4.0 h1:QlHdikaxALkqWasW8hAC1mfR0jdmvbfaBdBPFmRSglA= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= -github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/coredns/corefile-migration v1.0.6/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= @@ -255,14 +177,12 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/prometheus-operator v0.38.1-0.20200424145508-7e176fda06cc/go.mod h1:erio69w1R/aC14D5nfvAXSlE8FT8jt2Hnavc50Dp33A= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -270,59 +190,24 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= -github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= -github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= -github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4= -github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A= -github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= -github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= -github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= -github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= -github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/go-sysinfo v1.0.1/go.mod h1:O/D5m1VpYLwGjCYzEt63g3Z1uO3jXfwyzzjiW90t8cY= -github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= -github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= -github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -336,30 +221,24 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= @@ -367,13 +246,11 @@ github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0 github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -385,70 +262,55 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= @@ -464,39 +326,20 @@ github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoM github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= -github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gobuffalo/flect v0.2.1/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= -github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= -github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= -github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -529,9 +372,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= @@ -569,7 +409,6 @@ github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -579,7 +418,6 @@ github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIG github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -593,143 +431,89 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= -github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-health-probe v0.2.1-0.20181220223928-2bf0a5b182db/go.mod h1:uBKkC2RbarFsvS5jMJHpVhTLvGlGQj9JJwkaePE3FWI= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jpillora/go-ogle-analytics v0.0.0-20161213085824-14b04e0594ef/go.mod h1:PlwhC7q1VSK73InDzdDatVetQrTsQHIbOvcJAZzitY0= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jsonnet-bundler/jsonnet-bundler v0.3.1/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -738,7 +522,6 @@ github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -751,27 +534,15 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= -github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kyokomi/emoji v2.2.4+incompatible h1:np0woGKwx9LiHAQmwZx79Oc0rHpNw3o+3evou4BEPv4= github.com/kyokomi/emoji v2.2.4+incompatible/go.mod h1:mZ6aGCD7yk8j6QY6KICwnZ2pxoszVseX1DNoGtU2tBA= -github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= -github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/litmuschaos/chaos-operator v0.0.0-20221121155030-e5b440b5cd82 h1:+9bva8qc3SGBMuKzsggUOvNgqQKE7ttFgnnkO19y9Ug= -github.com/litmuschaos/chaos-operator v0.0.0-20221121155030-e5b440b5cd82/go.mod h1:CJGiHqC06PQkIBySk/JroB7B2zFebDbkhQ1A6ZbYmHA= -github.com/litmuschaos/elves v0.0.0-20201107015738-552d74669e3c/go.mod h1:DsbHGNUq/78NZozWVVI9Q6eBei4I+JjlkkD5aibJ3MQ= +github.com/litmuschaos/chaos-operator v0.0.0-20221124094152-26a41c021642 h1:2MF5hySuFgICBDuqB4sHCdAjMVAxitBQYnRckqWwo0s= +github.com/litmuschaos/chaos-operator v0.0.0-20221124094152-26a41c021642/go.mod h1:CJGiHqC06PQkIBySk/JroB7B2zFebDbkhQ1A6ZbYmHA= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= @@ -786,49 +557,26 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU= -github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= -github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -836,13 +584,10 @@ github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzO github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= @@ -850,18 +595,14 @@ github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXy github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= -github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= -github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -869,31 +610,21 @@ github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq4 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= -github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v0.0.0-20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= @@ -903,187 +634,98 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/openshift/origin v0.0.0-20160503220234-8f127d736703/go.mod h1:0Rox5r9C8aQn6j1oAOQ0c1uC86mYbUFObzjBRvUKHII= -github.com/openshift/prom-label-proxy v0.1.1-0.20191016113035-b8153a7f39f1/go.mod h1:p5MuxzsYP1JPsNGwtjtcgRHHlGziCJJfztff91nNixw= -github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/operator-framework/api v0.3.7-0.20200602203552-431198de9fc2/go.mod h1:Xbje9x0SHmh0nihE21kpesB38vk3cyxnE6JdDS8Jo1Q= -github.com/operator-framework/api v0.3.8/go.mod h1:Xbje9x0SHmh0nihE21kpesB38vk3cyxnE6JdDS8Jo1Q= -github.com/operator-framework/operator-registry v1.12.6-0.20200611222234-275301b779f8/go.mod h1:loVINznYhgBIkmv83kU4yee88RS0BBk+hqOw9r4bhJk= -github.com/operator-framework/operator-sdk v0.19.0/go.mod h1:8MR6CguLizat2RGjdSMifGwW6mEMwKqAtZnSUHJ6SxU= -github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/palantir/stacktrace v0.0.0-20161112013806-78658fd2d177 h1:nRlQD0u1871kaznCnn1EvYiMbum36v7hw1DLPEjds4o= +github.com/palantir/stacktrace v0.0.0-20161112013806-78658fd2d177/go.mod h1:ao5zGxj8Z4x60IOVYZUbDSmt3R8Ddo080vEgPosHpak= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= -github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= -github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= -github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= -github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= -github.com/prometheus/prometheus v1.8.2-0.20200110114423-1e64d757f711/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= -github.com/prometheus/prometheus v2.3.2+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= -github.com/robfig/cron v0.0.0-20170526150127-736158dc09e1/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.5.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rubenv/sql-migrate v0.0.0-20200212082348-64f95ea68aa3/go.mod h1:rtQlpHw+eR6UrqaS3kX1VYeaCxzCVdimDS7g5Ln4pPc= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= -github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= @@ -1091,7 +733,6 @@ github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1: github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -1105,7 +746,6 @@ github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1114,24 +754,17 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/thanos-io/thanos v0.11.0/go.mod h1:N/Yes7J68KqvmY+xM6J5CJqEvWIvKSR5sqGtmuD6wDc= github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= -github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= @@ -1143,15 +776,7 @@ github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1159,28 +784,14 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= -go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= -go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= -go.elastic.co/apm/module/apmot v1.5.0/go.mod h1:d2KYwhJParTpyw2WnTNy8geNlHKKFX+4oK3YLlsesWE= -go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1192,21 +803,14 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1217,25 +821,16 @@ golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b h1:Qwe1rC8PSniVfAFPFJeyUkB+zcysC3RgJBAGk7eqBEU= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1279,7 +874,6 @@ golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hM golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1289,7 +883,6 @@ golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1300,24 +893,17 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1344,13 +930,9 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1385,57 +967,37 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1471,10 +1033,7 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e h1:CsOuNlbOuf0mzxJIefr6Q4uAUetRUwZE4qt7VfzP+xo= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1485,7 +1044,6 @@ golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fq golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1504,14 +1062,11 @@ golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1524,7 +1079,6 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1534,21 +1088,12 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190918214516-5a1a30219888/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191030203535-5e247c9ad0a0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191111182352-50fa39b762bc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1568,7 +1113,6 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1588,25 +1132,18 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -gomodules.xyz/jsonpatch/v3 v3.0.1/go.mod h1:CBhndykehEwTOlEfnsfJwvkFQbSN8YZFr9M+cIHAJto= -gomodules.xyz/orderedmap v0.1.0/go.mod h1:g9/TPUCm1t2gwD3j3zfV8uylyYhVdCNSi+xCEIu7yTU= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -1634,7 +1171,6 @@ google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59t google.golang.org/api v0.48.0 h1:RDAPWfNFY06dffEXfn7hZF5Fr1ZbnChzfQZAPyBd1+I= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1642,25 +1178,19 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1693,17 +1223,12 @@ google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 h1:pc16UedxnxXXtGxHCSUhafAoVHQZ0yXl8ZelMH4EETc= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1739,37 +1264,28 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= -gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0Eppok+U= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.1.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1777,7 +1293,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -1789,8 +1304,6 @@ gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpA gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= -helm.sh/helm/v3 v3.2.4/go.mod h1:ZaXz/vzktgwjyGGFbUWtIQkscfE7WYoRGP2szqAFHR0= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1799,14 +1312,12 @@ honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y= k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc= k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= -k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= k8s.io/cli-runtime v0.21.2/go.mod h1:8u/jFcM0QpoI28f6sfrAAIslLCXUYKD5SsPPMWiHYrI= k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0= k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= @@ -1825,8 +1336,6 @@ k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= @@ -1836,18 +1345,14 @@ k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-aggregator v0.21.2/go.mod h1:7NgmUXJziySAJ7GxMRBBwcJay7MLUoxms31fw/ICpYk= k8s.io/kube-controller-manager v0.21.2/go.mod h1:gu0rV2UWy1k05E3kZxJFQE1F7RR1PZlq83+9J+lWlno= -k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-proxy v0.21.2/go.mod h1:gZXWzR5wi2lVfGeol0yp37rJZVIsCbPWqfeUXSykUUU= k8s.io/kube-scheduler v0.21.2/go.mod h1:uMnMNvgw2EAoujObL1tuJ5+tvj2Pnv3k7i3X069crrs= -k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+IjYA/E= k8s.io/kubectl v0.21.2/go.mod h1:PgeUclpG8VVmmQIl8zpLar3IQEpFc9mrmvlwY3CK1xo= k8s.io/kubelet v0.21.2/go.mod h1:1EqOUgp3BqvMXuZZRIlPDNkpgT5MfbJrpEnS4Gxn/mo= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/kubernetes v1.18.19 h1:g4wSMXrB9HRVBZL0IV9ld7B3uUNnMxIa7ogXHYwd/Z8= k8s.io/kubernetes v1.18.19/go.mod h1:2WmqQaRSlUlhX69or0YjnjAYxCkuI9O2Fz9hGog7heg= k8s.io/legacy-cloud-providers v0.21.2/go.mod h1:9dFEf/WGCqPhOIGQiAwcPfgAYWRot6txrCshWCg225c= @@ -1855,9 +1360,6 @@ k8s.io/metrics v0.21.2/go.mod h1:wzlOINZMCtWq8dR9gHlyaOemmYlOpAoldEIXE82gAhI= k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= k8s.io/sample-apiserver v0.21.2/go.mod h1:NXFq8jUrB3UyYhoGstFMXdHFSxfHZSHX6cUdVBVZKFM= k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= -k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1872,24 +1374,17 @@ mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIa mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo= sigs.k8s.io/controller-runtime v0.10.0 h1:HgyZmMpjUOrtkaFtCnfxsR1bGRuFoAczSNbn2MoKj5U= sigs.k8s.io/controller-runtime v0.10.0/go.mod h1:GCdh6kqV6IY4LK0JLwX0Zm6g233RtVGdb/f0+KSfprg= -sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= -sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI= -sigs.k8s.io/kubebuilder v1.0.9-0.20200618125005-36aa113dbe99/go.mod h1:FGPx0hvP73+bapzWoy5ePuhAJYgJjrFbPxgvWyortM0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY= sigs.k8s.io/kustomize/cmd/config v0.9.10/go.mod h1:Mrby0WnRH7hA6OwOYnYpfpiY0WJIMgYrEDfwOeFdMK0= sigs.k8s.io/kustomize/kustomize/v4 v4.1.2/go.mod h1:PxBvo4WGYlCLeRPL+ziT64wBXqbgfcalOS/SXa/tcyo= sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/pkg/aws-ssm/aws-ssm-chaos/environment/environment.go b/pkg/aws-ssm/aws-ssm-chaos/environment/environment.go index 42c9c7c7f..bf5cfedff 100644 --- a/pkg/aws-ssm/aws-ssm-chaos/environment/environment.go +++ b/pkg/aws-ssm/aws-ssm-chaos/environment/environment.go @@ -9,16 +9,14 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails, expName string) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") - experimentDetails.AuxiliaryAppInfo = types.Getenv("AUXILIARY_APPINFO", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "60")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") @@ -34,7 +32,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails, expName string experimentDetails.MemoryPercentage, _ = strconv.Atoi(types.Getenv("MEMORY_PERCENTAGE", "80")) experimentDetails.InstallDependencies = types.Getenv("INSTALL_DEPENDENCIES", "True") experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel") - experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "") switch expName { case "aws-ssm-chaos-by-tag": experimentDetails.EC2InstanceTag = types.Getenv("EC2_INSTANCE_TAG", "") diff --git a/pkg/aws-ssm/aws-ssm-chaos/types/types.go b/pkg/aws-ssm/aws-ssm-chaos/types/types.go index 7bffd6b1f..3e0a23ea7 100644 --- a/pkg/aws-ssm/aws-ssm-chaos/types/types.go +++ b/pkg/aws-ssm/aws-ssm-chaos/types/types.go @@ -9,8 +9,6 @@ type ExperimentDetails struct { ExperimentName string EngineName string RampTime int - AuxiliaryAppInfo string - ChaosLib string ChaosDuration int ChaosInterval int ChaosUID clientTypes.UID @@ -24,8 +22,6 @@ type ExperimentDetails struct { Region string InstanceAffectedPerc int Sequence string - LIBImagePullPolicy string - TargetContainer string Cpu int NumberOfWorkers int MemoryPercentage int diff --git a/pkg/azure/disk-loss/environment/environment.go b/pkg/azure/disk-loss/environment/environment.go index 436cfd432..ee48cfac5 100644 --- a/pkg/azure/disk-loss/environment/environment.go +++ b/pkg/azure/disk-loss/environment/environment.go @@ -2,6 +2,7 @@ package environment import ( "strconv" + "strings" clientTypes "k8s.io/apimachinery/pkg/types" @@ -9,17 +10,14 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "azure-disk-loss") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") - experimentDetails.AuxiliaryAppInfo = types.Getenv("AUXILIARY_APPINFO", "") - experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") @@ -27,6 +25,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180")) experimentDetails.ScaleSet = types.Getenv("SCALE_SET", "disable") experimentDetails.ResourceGroup = types.Getenv("RESOURCE_GROUP", "") - experimentDetails.VirtualDiskNames = types.Getenv("VIRTUAL_DISK_NAMES", "") + experimentDetails.VirtualDiskNames = strings.TrimSpace(types.Getenv("VIRTUAL_DISK_NAMES", "")) experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel") } diff --git a/pkg/azure/disk-loss/types/types.go b/pkg/azure/disk-loss/types/types.go index 453647c4f..fab9c6bd7 100644 --- a/pkg/azure/disk-loss/types/types.go +++ b/pkg/azure/disk-loss/types/types.go @@ -6,24 +6,20 @@ import ( // ExperimentDetails is for collecting all the experiment-related details type ExperimentDetails struct { - ExperimentName string - EngineName string - ChaosDuration int - ChaosInterval int - RampTime int - AuxiliaryAppInfo string - ChaosLib string - ChaosUID clientTypes.UID - InstanceID string - ChaosNamespace string - ChaosPodName string - TargetContainer string - Timeout int - Delay int - LIBImagePullPolicy string - ScaleSet string - ResourceGroup string - SubscriptionID string - VirtualDiskNames string - Sequence string + ExperimentName string + EngineName string + ChaosDuration int + ChaosInterval int + RampTime int + ChaosUID clientTypes.UID + InstanceID string + ChaosNamespace string + ChaosPodName string + Timeout int + Delay int + ScaleSet string + ResourceGroup string + SubscriptionID string + VirtualDiskNames string + Sequence string } diff --git a/pkg/azure/instance-stop/environment/environment.go b/pkg/azure/instance-stop/environment/environment.go index fda2b5b09..8eb13b51e 100644 --- a/pkg/azure/instance-stop/environment/environment.go +++ b/pkg/azure/instance-stop/environment/environment.go @@ -2,6 +2,7 @@ package environment import ( "strconv" + "strings" clientTypes "k8s.io/apimachinery/pkg/types" @@ -14,21 +15,15 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "azure-instance-stop") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") - experimentDetails.AppNS = types.Getenv("APP_NAMESPACE", "") - experimentDetails.AppLabel = types.Getenv("APP_LABEL", "") - experimentDetails.AppKind = types.Getenv("APP_KIND", "") - experimentDetails.AuxiliaryAppInfo = types.Getenv("AUXILIARY_APPINFO", "") - experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2")) experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180")) - experimentDetails.AzureInstanceNames = types.Getenv("AZURE_INSTANCE_NAMES", "") + experimentDetails.AzureInstanceNames = strings.TrimSpace(types.Getenv("AZURE_INSTANCE_NAMES", "")) experimentDetails.ResourceGroup = types.Getenv("RESOURCE_GROUP", "") experimentDetails.ScaleSet = types.Getenv("SCALE_SET", "disable") experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel") diff --git a/pkg/azure/instance-stop/types/types.go b/pkg/azure/instance-stop/types/types.go index 63a0cd3f8..921798101 100644 --- a/pkg/azure/instance-stop/types/types.go +++ b/pkg/azure/instance-stop/types/types.go @@ -9,24 +9,17 @@ type ExperimentDetails struct { ExperimentName string EngineName string RampTime int - AppNS string - AppLabel string - AppKind string - AuxiliaryAppInfo string - ChaosLib string ChaosDuration int ChaosInterval int ChaosUID clientTypes.UID InstanceID string ChaosNamespace string ChaosPodName string - TargetContainer string Timeout int Delay int AzureInstanceNames string ResourceGroup string SubscriptionID string ScaleSet string - LIBImagePullPolicy string Sequence string } diff --git a/pkg/baremetal/redfish-node-restart/environment/environment.go b/pkg/baremetal/redfish-node-restart/environment/environment.go index af77152f2..bafe960ae 100644 --- a/pkg/baremetal/redfish-node-restart/environment/environment.go +++ b/pkg/baremetal/redfish-node-restart/environment/environment.go @@ -16,7 +16,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") diff --git a/pkg/baremetal/redfish-node-restart/types/types.go b/pkg/baremetal/redfish-node-restart/types/types.go index ee45fc72f..e7575b398 100644 --- a/pkg/baremetal/redfish-node-restart/types/types.go +++ b/pkg/baremetal/redfish-node-restart/types/types.go @@ -6,21 +6,19 @@ import ( // ExperimentDetails is for collecting all the experiment-related details type ExperimentDetails struct { - ExperimentName string - EngineName string - ChaosDuration int - RampTime int - ChaosLib string - TargetContainer string - ChaosUID clientTypes.UID - InstanceID string - ChaosNamespace string - ChaosPodName string - AuxiliaryAppInfo string - Timeout int - Delay int - LIBImagePullPolicy string - IPMIIP string - User string - Password string + ExperimentName string + EngineName string + ChaosDuration int + RampTime int + TargetContainer string + ChaosUID clientTypes.UID + InstanceID string + ChaosNamespace string + ChaosPodName string + AuxiliaryAppInfo string + Timeout int + Delay int + IPMIIP string + User string + Password string } diff --git a/pkg/baremetal/redfish/redfish.go b/pkg/baremetal/redfish/redfish.go index 5fb5ca194..8bf5271c4 100644 --- a/pkg/baremetal/redfish/redfish.go +++ b/pkg/baremetal/redfish/redfish.go @@ -9,16 +9,16 @@ import ( "fmt" "net/http" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/pkg/errors" ) -//State helps get the power state of the node +// State helps get the power state of the node type State struct { PowerState string } -//GetNodeStatus will check and return the status of the node. +// GetNodeStatus will check and return the status of the node. func GetNodeStatus(IP, user, password string) (string, error) { URL := fmt.Sprintf("https://%v/redfish/v1/Systems/System.Embedded.1/", IP) auth := user + ":" + password @@ -27,16 +27,13 @@ func GetNodeStatus(IP, user, password string) (string, error) { json_data, _ := json.Marshal(data) req, err := http.NewRequest("GET", URL, bytes.NewBuffer(json_data)) if err != nil { - msg := fmt.Sprintf("Error creating http request: %v", err) - log.Error(msg) - return "", errors.Errorf("fail to get the node status, err: %v", err) + log.Errorf("Error creating HTTP get request, err: ", err) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("failed to get the node status, err: %v", err)} } req.Header.Add("Authorization", "Basic "+encodedAuth) req.Header.Add("Content-Type", "application/json") req.Header.Add("Accept", "*/*") - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } + tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} client := &http.Client{Transport: tr} resp, err := client.Do(req) if err != nil { @@ -46,7 +43,7 @@ func GetNodeStatus(IP, user, password string) (string, error) { log.Infof(resp.Status) if resp.StatusCode != 200 { log.Error("Unable to get current state of the node") - return "", errors.Errorf("fail to get the node status. Request failed with status: %v", resp.StatusCode) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("failed to get the node status. Request failed with status: %v", resp.StatusCode)} } defer resp.Body.Close() power := new(State) @@ -54,7 +51,7 @@ func GetNodeStatus(IP, user, password string) (string, error) { return power.PowerState, nil } -//RebootNode triggers hard reset on the target baremetal node +// RebootNode triggers hard reset on the target baremetal node func RebootNode(URL, user, password string) error { data := map[string]string{"ResetType": "ForceRestart"} json_data, err := json.Marshal(data) @@ -62,13 +59,12 @@ func RebootNode(URL, user, password string) error { encodedAuth := base64.StdEncoding.EncodeToString([]byte(auth)) if err != nil { log.Error(err.Error()) - return errors.New("Unable to encode the authentication credentials") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("unable to encode the authentication credentials, err: %v", err)} } req, err := http.NewRequest("POST", URL, bytes.NewBuffer(json_data)) if err != nil { - msg := fmt.Sprintf("Error creating http request: %v", err) - log.Error(msg) - return errors.New(msg) + log.Errorf("Error creating HTTP post request, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("error creating http post request, err: %v", err)} } req.Header.Add("Authorization", "Basic "+encodedAuth) req.Header.Add("Content-Type", "application/json") @@ -79,13 +75,12 @@ func RebootNode(URL, user, password string) error { client := &http.Client{Transport: tr} resp, err := client.Do(req) if err != nil { - msg := fmt.Sprintf("Error creating post request: %v", err) - log.Error(msg) - return errors.New(msg) + log.Errorf("Error creating HTTP post request, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("error creating http post request, err: %v", err)} } log.Infof(resp.Status) - if resp.StatusCode >= 400 && resp.StatusCode < 200 { - return errors.New("Failed to trigger node restart") + if resp.StatusCode >= 400 || resp.StatusCode < 200 { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("failed to trigger node restart, received http status code %v", resp.StatusCode)} } defer resp.Body.Close() return nil diff --git a/pkg/cassandra/liveness.go b/pkg/cassandra/liveness.go index f39e4414c..0888528d1 100644 --- a/pkg/cassandra/liveness.go +++ b/pkg/cassandra/liveness.go @@ -2,7 +2,8 @@ package cassandra import ( "context" - "io/ioutil" + "fmt" + "io" "net/http" "strconv" "strings" @@ -11,6 +12,7 @@ import ( "github.com/pkg/errors" experimentTypes "github.com/litmuschaos/litmus-go/pkg/cassandra/pod-delete/types" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/status" @@ -41,13 +43,13 @@ func LivenessCheck(experimentsDetails *experimentTypes.ExperimentDetails, client // Checking the status of liveness deployment pod log.Info("[Status]: Checking the status of the cassandra liveness pod") if err := status.CheckApplicationStatusesByLabels(experimentsDetails.ChaoslibDetail.AppNS, "name=cassandra-liveness-deploy-"+experimentsDetails.RunID, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil { - return "", errors.Errorf("liveness pod is not in running state, err: %v", err) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("liveness pod is not in running state, %s", err.Error())} } // Record cassandra liveness pod resource version ResourceVersionBefore, err := GetLivenessPodResourceVersion(experimentsDetails, clients) if err != nil { - return ResourceVersionBefore, errors.Errorf("failed to get the pod resource version, err: %v", err) + return ResourceVersionBefore, cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("failed to get the pod resource version, %s", err.Error())} } return ResourceVersionBefore, nil @@ -61,13 +63,13 @@ func LivenessCleanup(experimentsDetails *experimentTypes.ExperimentDetails, clie log.Info("[CleanUP]: Getting ClusterIP of liveness service") ClusterIP, err := GetServiceClusterIP(experimentsDetails, clients) if err != nil { - return errors.Errorf("failed to get the ClusterIP of liveness service, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the ClusterIP of liveness service, %s", err.Error())} } // Record cassandra liveness pod resource version after chaos ResourceVersionAfter, err := GetLivenessPodResourceVersion(experimentsDetails, clients) if err != nil { - return errors.Errorf("failed to get the pod resource version, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the pod resource version, %s", err.Error())} } if err = ResourceVersionCheck(ResourceVersionBefore, ResourceVersionAfter); err != nil { @@ -75,15 +77,15 @@ func LivenessCleanup(experimentsDetails *experimentTypes.ExperimentDetails, clie } if err = WaitTillCycleComplete(experimentsDetails, ClusterIP); err != nil { - return errors.Errorf("cycle complete test failed, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("cycle complete test failed, %s", err.Error())} } log.Info("[Cleanup]: Deleting cassandra liveness deployment & service") if err = DeleteLivenessDeployment(experimentsDetails, clients); err != nil { - return errors.Errorf("liveness deployment deletion failed, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("liveness deployment deletion failed, %s", err.Error())} } if err = DeleteLivenessService(experimentsDetails, clients); err != nil { - return errors.Errorf("liveness service deletion failed, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("liveness service deletion failed, %s", err.Error())} } log.Info("[Cleanup]: Cassandra liveness service has been deleted successfully") @@ -96,9 +98,9 @@ func GetLivenessPodResourceVersion(experimentsDetails *experimentTypes.Experimen livenessPods, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaoslibDetail.AppNS).List(context.Background(), metav1.ListOptions{LabelSelector: "name=cassandra-liveness-deploy-" + experimentsDetails.RunID}) if err != nil { - return "", errors.Errorf("unable to get the liveness pod, err: %v", err) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the liveness pod, %s", err.Error())} } else if len(livenessPods.Items) == 0 { - return "", errors.Errorf("No liveness pod found with matching labels") + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "no liveness pod found with matching labels"} } ResourceVersion := livenessPods.Items[0].ResourceVersion @@ -110,7 +112,7 @@ func GetServiceClusterIP(experimentsDetails *experimentTypes.ExperimentDetails, service, err := clients.KubeClient.CoreV1().Services(experimentsDetails.ChaoslibDetail.AppNS).Get(context.Background(), "cassandra-liveness-service-"+experimentsDetails.RunID, metav1.GetOptions{}) if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to fetch the liveness service, %s", err.Error())} } return service.Spec.ClusterIP, nil @@ -130,12 +132,12 @@ func WaitTillCycleComplete(experimentsDetails *experimentTypes.ExperimentDetails Try(func(attempt uint) error { response, err := http.Get(URL) if err != nil { - return errors.Errorf("the HTTP request failed with error %s", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("the HTTP request failed with error %s", err)} } - data, _ := ioutil.ReadAll(response.Body) + data, _ := io.ReadAll(response.Body) if !strings.Contains(string(data), "CycleComplete") { log.Info("[Verification]: Wait for liveness pod to come in CycleComplete state") - return errors.Errorf("livenss pod is not in completed state") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: "livenss pod is not in completed state"} } log.Info("Liveness pod comes to CycleComplete state") return nil @@ -146,7 +148,7 @@ func WaitTillCycleComplete(experimentsDetails *experimentTypes.ExperimentDetails func ResourceVersionCheck(ResourceVersionBefore, ResourceVersionAfter string) error { if ResourceVersionBefore != ResourceVersionAfter { - return errors.Errorf("resource version check failed, Resource version remains same") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "liveness pod failed as target pod is unhealthy"} } log.Info("The cassandra cluster is active") @@ -168,9 +170,9 @@ func DeleteLivenessDeployment(experimentsDetails *experimentTypes.ExperimentDeta Try(func(attempt uint) error { podSpec, err := clients.KubeClient.AppsV1().Deployments(experimentsDetails.ChaoslibDetail.AppNS).List(context.Background(), metav1.ListOptions{LabelSelector: "name=cassandra-liveness-deploy-" + experimentsDetails.RunID}) if err != nil { - return errors.Errorf("liveness deployment is not deleted yet, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: fmt.Sprintf("liveness deployment is not deleted yet, %s", err.Error())} } else if len(podSpec.Items) != 0 { - return errors.Errorf("liveness deployment is not deleted yet") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: "liveness pod is not deleted yet"} } return nil }) @@ -183,7 +185,7 @@ func DeleteLivenessService(experimentsDetails *experimentTypes.ExperimentDetails if err := clients.KubeClient.CoreV1().Services(experimentsDetails.ChaoslibDetail.AppNS).Delete(context.Background(), "cassandra-liveness-service-"+experimentsDetails.RunID, metav1.DeleteOptions{ PropagationPolicy: &deletePolicy, }); err != nil { - return errors.Errorf("fail to delete liveness service, err: %v", err) + return errors.Errorf("fail to delete liveness service, %s", err.Error()) } return retry. Times(uint(experimentsDetails.ChaoslibDetail.Timeout / experimentsDetails.ChaoslibDetail.Delay)). @@ -191,9 +193,9 @@ func DeleteLivenessService(experimentsDetails *experimentTypes.ExperimentDetails Try(func(attempt uint) error { svc, err := clients.KubeClient.CoreV1().Services(experimentsDetails.ChaoslibDetail.AppNS).List(context.Background(), metav1.ListOptions{LabelSelector: "name=cassandra-liveness-service-" + experimentsDetails.RunID}) if err != nil { - return errors.Errorf("liveness service is not deleted yet, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: fmt.Sprintf("liveness service is not deleted yet, %s", err.Error())} } else if len(svc.Items) != 0 { - return errors.Errorf("liveness service is not deleted yet") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: "liveness service is not deleted yet"} } return nil }) @@ -328,7 +330,7 @@ func CreateLivenessPod(experimentsDetails *experimentTypes.ExperimentDetails, cl // Creating liveness deployment _, err := clients.KubeClient.AppsV1().Deployments(experimentsDetails.ChaoslibDetail.AppNS).Create(context.Background(), liveness, metav1.CreateOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{deploymentName: %s, namespace: %s}", liveness.Name, liveness.Namespace), Reason: fmt.Sprintf("unable to create liveness deployment, %s", err.Error())} } log.Info("Liveness Deployment Created successfully!") return nil @@ -366,7 +368,7 @@ func CreateLivenessService(experimentsDetails *experimentTypes.ExperimentDetails // Creating liveness service _, err := clients.KubeClient.CoreV1().Services(experimentsDetails.ChaoslibDetail.AppNS).Create(context.Background(), livenessSvc, metav1.CreateOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{serviceName: %s, namespace: %s}", livenessSvc.Name, livenessSvc.Namespace), Reason: fmt.Sprintf("unable to create liveness service, %s", err.Error())} } log.Info("Liveness service created successfully!") diff --git a/pkg/cassandra/node-tools.go b/pkg/cassandra/node-tools.go index 2580c357e..3e6bf0ed0 100644 --- a/pkg/cassandra/node-tools.go +++ b/pkg/cassandra/node-tools.go @@ -2,8 +2,10 @@ package cassandra import ( "context" + "fmt" "strings" + "github.com/litmuschaos/litmus-go/pkg/cerrors" litmusexec "github.com/litmuschaos/litmus-go/pkg/utils/exec" "github.com/pkg/errors" @@ -43,25 +45,25 @@ func NodeToolStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, return nil } -//GetApplicationPodName will return the name of first application pod +// GetApplicationPodName will return the name of first application pod func GetApplicationPodName(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (string, error) { podList, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaoslibDetail.AppNS).List(context.Background(), metav1.ListOptions{LabelSelector: experimentsDetails.ChaoslibDetail.AppLabel}) if err != nil { - return "", errors.Errorf("failed to get the application pod in %v namespace, err: %v", experimentsDetails.ChaoslibDetail.AppNS, err) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the application pod in %v namespace, err: %v", experimentsDetails.ChaoslibDetail.AppNS, err)} } else if len(podList.Items) == 0 { - return "", errors.Errorf("failed to get the application pod in %v namespace", experimentsDetails.ChaoslibDetail.AppNS) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the application pod in %v namespace", experimentsDetails.ChaoslibDetail.AppNS)} } return podList.Items[0].Name, nil } -//GetApplicationReplicaCount will return the replica count of the sts application +// GetApplicationReplicaCount will return the replica count of the sts application func GetApplicationReplicaCount(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (int, error) { podList, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaoslibDetail.AppNS).List(context.Background(), metav1.ListOptions{LabelSelector: experimentsDetails.ChaoslibDetail.AppLabel}) if err != nil { - return 0, errors.Errorf("failed to get the application pod in %v namespace, err: %v", experimentsDetails.ChaoslibDetail.AppNS, err) + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the application pod in %v namespace, err: %v", experimentsDetails.ChaoslibDetail.AppNS, err)} } else if len(podList.Items) == 0 { - return 0, errors.Errorf("failed to get the application pod in %v namespace", experimentsDetails.ChaoslibDetail.AppNS) + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the application pod in %v namespace", experimentsDetails.ChaoslibDetail.AppNS)} } return len(podList.Items), nil } @@ -72,13 +74,13 @@ func CheckLoadPercentage(loadPercentage []string, replicaCount int) error { // It will make sure that the replica have some load // It will fail if replica has 0% load if len(loadPercentage) != replicaCount { - return errors.Errorf("failed to get the load on every replica") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "failed to get the load on all of the replicas"} } for count := 0; count < len(loadPercentage); count++ { if loadPercentage[count] == "0%" || loadPercentage[count] == "" { - return errors.Errorf("the load distribution percentage failed, as its value is: '%v'", loadPercentage[count]) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("the load distribution percentage failed, as its value is: '%v'", loadPercentage[count])} } } log.Info("[Check]: Load is distributed over all the replica of cassandra") @@ -96,7 +98,7 @@ func GetLoadDistribution(experimentsDetails *experimentTypes.ExperimentDetails, litmusexec.SetExecCommandAttributes(&execCommandDetails, targetPod, "cassandra", experimentsDetails.ChaoslibDetail.AppNS) response, err := litmusexec.Exec(&execCommandDetails, clients, command) if err != nil { - return nil, errors.Errorf("unable to get nodetool status details, err: %v", err) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to get nodetool status details, err: %v", err)} } split := strings.Split(response, "\n") loadPercentage := split[:len(split)-1] diff --git a/pkg/cassandra/pod-delete/environment/environment.go b/pkg/cassandra/pod-delete/environment/environment.go index ab16746a8..b081cc714 100644 --- a/pkg/cassandra/pod-delete/environment/environment.go +++ b/pkg/cassandra/pod-delete/environment/environment.go @@ -20,7 +20,6 @@ func GetENV(cassandraDetails *cassandraTypes.ExperimentDetails) { ChaoslibDetail.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) ChaoslibDetail.ChaosInterval = types.Getenv("CHAOS_INTERVAL", "10") ChaoslibDetail.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - ChaoslibDetail.ChaosLib = types.Getenv("LIB", "litmus") ChaoslibDetail.ChaosServiceAccount = types.Getenv("CHAOS_SERVICE_ACCOUNT", "") ChaoslibDetail.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) ChaoslibDetail.InstanceID = types.Getenv("INSTANCE_ID", "") diff --git a/pkg/cerrors/custom_errors.go b/pkg/cerrors/custom_errors.go new file mode 100644 index 000000000..5019a73b7 --- /dev/null +++ b/pkg/cerrors/custom_errors.go @@ -0,0 +1,105 @@ +package cerrors + +import ( + "encoding/json" + + "github.com/palantir/stacktrace" +) + +type ErrorType string + +const ( + ErrorTypeNonUserFriendly ErrorType = "NON_USER_FRIENDLY_ERROR" + ErrorTypeGeneric ErrorType = "GENERIC_ERROR" + ErrorTypeChaosResultCRUD ErrorType = "CHAOS_RESULT_CRUD_ERROR" + ErrorTypeStatusChecks ErrorType = "STATUS_CHECKS_ERROR" + ErrorTypeTargetSelection ErrorType = "TARGET_SELECTION_ERROR" + ErrorTypeExperimentAborted ErrorType = "EXPERIMENT_ABORTED" + ErrorTypeHelper ErrorType = "HELPER_ERROR" + ErrorTypeHelperPodFailed ErrorType = "HELPER_POD_FAILED_ERROR" + ErrorTypeContainerRuntime ErrorType = "CONTAINER_RUNTIME_ERROR" + ErrorTypeChaosInject ErrorType = "CHAOS_INJECT_ERROR" + ErrorTypeChaosRevert ErrorType = "CHAOS_REVERT_ERROR" + ErrorTypeK8sProbe ErrorType = "K8S_PROBE_ERROR" + ErrorTypeCmdProbe ErrorType = "CMD_PROBE_ERROR" + ErrorTypeHttpProbe ErrorType = "HTTP_PROBE_ERROR" + ErrorTypePromProbe ErrorType = "PROM_PROBE_ERROR" +) + +type userFriendly interface { + UserFriendly() bool + ErrorType() ErrorType +} + +// IsUserFriendly returns true if err is marked as safe to present to failstep +func IsUserFriendly(err error) bool { + ufe, ok := err.(userFriendly) + return ok && ufe.UserFriendly() +} + +// GetErrorType returns the type of error if the error is user-friendly +func GetErrorType(err error) ErrorType { + if ufe, ok := err.(userFriendly); ok { + return ufe.ErrorType() + } + return ErrorTypeNonUserFriendly +} + +func GetRootCauseAndErrorCode(err error, phase string) (string, ErrorType) { + rootCause := stacktrace.RootCause(err) + errorType := GetErrorType(rootCause) + if !IsUserFriendly(rootCause) { + return err.Error(), errorType + } + if error, ok := rootCause.(Error); ok { + if error.Phase == "" { + error.Phase = phase + } + return error.Error(), errorType + } + return rootCause.Error(), errorType +} + +type Error struct { + Source string `json:"source,omitempty"` + ErrorCode ErrorType `json:"errorCode,omitempty"` + Phase string `json:"phase,omitempty"` + Reason string `json:"reason,omitempty"` + Target string `json:"target,omitempty"` +} + +func (e Error) Error() string { + return convertToJson(e) +} + +func (e Error) UserFriendly() bool { + return true +} + +func (e Error) ErrorType() ErrorType { + return e.ErrorCode +} + +func convertToJson(v interface{}) string { + vStr, err := json.Marshal(v) + if err != nil { + return err.Error() + } + return string(vStr) +} + +type PreserveError struct { + ErrString string +} + +func (pe PreserveError) Error() string { + return pe.ErrString +} + +func (pe PreserveError) UserFriendly() bool { + return true +} + +func (pe PreserveError) ErrorType() ErrorType { + return ErrorTypeGeneric +} diff --git a/pkg/cloud/aws/ebs/ebs-operations.go b/pkg/cloud/aws/ebs/ebs-operations.go index 1d43f2a57..b5722b195 100644 --- a/pkg/cloud/aws/ebs/ebs-operations.go +++ b/pkg/cloud/aws/ebs/ebs-operations.go @@ -1,14 +1,15 @@ package aws import ( + "fmt" "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/common" experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/types" "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -26,10 +27,14 @@ func EBSVolumeDetach(ebsVolumeID, region string) error { result, err := ec2Svc.DetachVolume(input) if err != nil { - return common.CheckAWSError(err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to detach volume: %v", common.CheckAWSError(err).Error()), + Target: fmt.Sprintf("{EBS Volume ID: %v, Region: %v}", ebsVolumeID, region), + } } - log.InfoWithValues("Detaching ebs having:", logrus.Fields{ + log.InfoWithValues("Detaching EBS having:", logrus.Fields{ "VolumeId": *result.VolumeId, "State": *result.State, "Device": *result.Device, @@ -57,10 +62,14 @@ func EBSVolumeAttach(ebsVolumeID, ec2InstanceID, deviceName, region string) erro result, err := ec2Svc.AttachVolume(input) if err != nil { - return common.CheckAWSError(err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("failed to attach volume: %v", common.CheckAWSError(err).Error()), + Target: fmt.Sprintf("{EBS Volume ID: %v, Region: %v}", ebsVolumeID, region), + } } - log.InfoWithValues("Attaching ebs having:", logrus.Fields{ + log.InfoWithValues("Attaching EBS having:", logrus.Fields{ "VolumeId": *result.VolumeId, "State": *result.State, "Device": *result.Device, @@ -69,7 +78,7 @@ func EBSVolumeAttach(ebsVolumeID, ec2InstanceID, deviceName, region string) erro return nil } -//SetTargetVolumeIDs will filter out the volume under chaos +// SetTargetVolumeIDs will filter out the volume under chaos func SetTargetVolumeIDs(experimentsDetails *experimentTypes.ExperimentDetails) error { sess := common.GetAWSSession(experimentsDetails.Region) @@ -78,7 +87,11 @@ func SetTargetVolumeIDs(experimentsDetails *experimentTypes.ExperimentDetails) e ec2Svc := ec2.New(sess) res, err := ec2Svc.DescribeVolumes(params) if err != nil { - return errors.Errorf("fail to describe the volumes of given tag, err: %v", err.Error()) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: fmt.Sprintf("failed to fetch the volumes with the given tag: %v", err), + Target: fmt.Sprintf("{EBS Volume Tag: %v, Region: %v}", experimentsDetails.VolumeTag, experimentsDetails.Region), + } } for _, volumeDetails := range res.Volumes { if *volumeDetails.State == "in-use" { @@ -87,7 +100,11 @@ func SetTargetVolumeIDs(experimentsDetails *experimentTypes.ExperimentDetails) e } if len(experimentsDetails.TargetVolumeIDList) == 0 { - return errors.Errorf("fail to get any attaced volumes to detach using tag: %v", experimentsDetails.VolumeTag) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: "no attached volumes found", + Target: fmt.Sprintf("{EBS Volume Tag: %v, Region: %v}", experimentsDetails.VolumeTag, experimentsDetails.Region), + } } log.InfoWithValues("[Info]: Targeting the attached volumes,", logrus.Fields{ @@ -98,7 +115,7 @@ func SetTargetVolumeIDs(experimentsDetails *experimentTypes.ExperimentDetails) e return nil } -//GetVolumeAttachmentDetails will give the attachment information of the ebs volume +// GetVolumeAttachmentDetails will give the attachment information of the ebs volume func GetVolumeAttachmentDetails(volumeID, volumeTag, region string) (string, string, error) { sess := common.GetAWSSession(region) @@ -107,18 +124,26 @@ func GetVolumeAttachmentDetails(volumeID, volumeTag, region string) (string, str param := getVolumeFilter(volumeTag) res, err := ec2Svc.DescribeVolumes(param) if err != nil { - return "", "", errors.Errorf("fail to describe the volumes of given tag, err: %v", err.Error()) + return "", "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: fmt.Sprintf("failed to fetch the volumes with the given tag: %v", err), + Target: fmt.Sprintf("{EBS Volume Tag: %v, Region: %v}", volumeTag, region), + } } for _, volumeDetails := range res.Volumes { if *volumeDetails.VolumeId == volumeID { - //As the first iteam of the attachment list contains the attachment details + //As the first item of the attachment list contains the attachment details return *volumeDetails.Attachments[0].InstanceId, *volumeDetails.Attachments[0].Device, nil } } - return "", "", errors.Errorf("no attachment details found for the given volumeID: %v", volumeID) + return "", "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: "no attachment details found for the given volumeID", + Target: fmt.Sprintf("{EBS Volume ID: %v, Region: %v}", volumeID, region), + } } -//getVolumeFilter will set a filter and return to get the volume with a given tag +// getVolumeFilter will set a filter and return to get the volume with a given tag func getVolumeFilter(ebsVolumeTag string) *ec2.DescribeVolumesInput { if ebsVolumeTag != "" { volumeTag := strings.Split(ebsVolumeTag, ":") diff --git a/pkg/cloud/aws/ebs/ebs-volume-state.go b/pkg/cloud/aws/ebs/ebs-volume-state.go index d49bd307b..c2b104b82 100644 --- a/pkg/cloud/aws/ebs/ebs-volume-state.go +++ b/pkg/cloud/aws/ebs/ebs-volume-state.go @@ -1,36 +1,41 @@ package aws import ( + "fmt" "strings" "time" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/common" experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" ) // WaitForVolumeDetachment will wait the ebs volume to completely detach func WaitForVolumeDetachment(ebsVolumeID, ec2InstanceID, region string, delay, timeout int) error { - - log.Info("[Status]: Checking ebs volume status for detachment") + log.Info("[Status]: Checking EBS volume status for detachment") return retry. Times(uint(timeout / delay)). Wait(time.Duration(delay) * time.Second). Try(func(attempt uint) error { volumeState, err := GetEBSStatus(ebsVolumeID, ec2InstanceID, region) if err != nil { - return errors.Errorf("failed to get the volume state") + return stacktrace.Propagate(err, "failed to get the volume state") } // We are checking the the attached state as well here as in case of PVs the volume may get attached itself // To check if all the volumes have undergone detachment process we make use of CheckEBSDetachmentInitialisation // TODO: Need to check an optimised approach to do this using apis. if volumeState != "detached" && volumeState != "attached" { log.Infof("[Info]: The volume state is %v", volumeState) - return errors.Errorf("volume is not yet in detached state") + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: "volume is not detached within timeout", + Target: fmt.Sprintf("{EBS Volume ID: %v, EC2 Instance ID: %v, Region: %v}", ebsVolumeID, ec2InstanceID, region), + } } log.Infof("[Info]: The volume state is %v", volumeState) return nil @@ -39,8 +44,7 @@ func WaitForVolumeDetachment(ebsVolumeID, ec2InstanceID, region string, delay, t // WaitForVolumeAttachment will wait for the ebs volume to get attached on ec2 instance func WaitForVolumeAttachment(ebsVolumeID, ec2InstanceID, region string, delay, timeout int) error { - - log.Info("[Status]: Checking ebs volume status for attachment") + log.Info("[Status]: Checking EBS volume status for attachment") return retry. Times(uint(timeout / delay)). Wait(time.Duration(delay) * time.Second). @@ -48,18 +52,21 @@ func WaitForVolumeAttachment(ebsVolumeID, ec2InstanceID, region string, delay, t volumeState, err := GetEBSStatus(ebsVolumeID, ec2InstanceID, region) if err != nil { - return errors.Errorf("failed to get the volume status") + return stacktrace.Propagate(err, "failed to get the volume status") } if volumeState != "attached" { log.Infof("[Info]: The volume state is %v", volumeState) - return errors.Errorf("volume is not yet in attached state") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: "volume is not attached within timeout", + Target: fmt.Sprintf("{EBS Volume ID: %v, EC2 Instance ID: %v, Region: %v}", ebsVolumeID, ec2InstanceID, region), + } } log.Infof("[Info]: The volume state is %v", volumeState) return nil }) } -//GetEBSStatus will verify and give the ec2 instance details along with ebs volume details. +// GetEBSStatus will verify and give the ec2 instance details along with ebs volume details. func GetEBSStatus(ebsVolumeID, ec2InstanceID, region string) (string, error) { // Load session from shared config @@ -72,7 +79,11 @@ func GetEBSStatus(ebsVolumeID, ec2InstanceID, region string) (string, error) { // Call to get detailed information on each instance result, err := ec2Svc.DescribeVolumes(input) if err != nil { - return "", common.CheckAWSError(err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to get EBS volume status: %v", common.CheckAWSError(err).Error()), + Target: fmt.Sprintf("{EBS Volume ID: %v, EC2 Instance ID: %v, Region: %v}", ebsVolumeID, ec2InstanceID, region), + } } for _, volumeDetails := range result.Volumes { @@ -96,30 +107,42 @@ func GetEBSStatus(ebsVolumeID, ec2InstanceID, region string) (string, error) { return "detached", nil } } - return "", errors.Errorf("unable to find the ebs volume with volumeId %v", ebsVolumeID) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "unable to find the EBS volume", + Target: fmt.Sprintf("{EBS Volume ID: %v, EC2 Instance ID: %v, Region: %v}", ebsVolumeID, ec2InstanceID, region), + } } -//EBSStateCheckByID will check the attachment state of the given volume +// EBSStateCheckByID will check the attachment state of the given volume func EBSStateCheckByID(volumeIDs, region string) error { volumeIDList := strings.Split(volumeIDs, ",") if len(volumeIDList) == 0 { - return errors.Errorf("no volumeID provided, please provide a volume to detach") + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "no volumeID provided, please provide a volume to detach", + Target: fmt.Sprintf("{Region: %v}", region), + } } for _, id := range volumeIDList { instanceID, _, err := GetVolumeAttachmentDetails(id, "", region) if err != nil { - return errors.Errorf("fail to get the instanceID for the given volume, err: %v", err) + return stacktrace.Propagate(err, "failed to get the instanceID for the given volume") } volumeState, err := GetEBSStatus(id, instanceID, region) if err != nil || volumeState != "attached" { - return errors.Errorf("fail to get the ebs volume %v in attached state, err: %v", id, err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "failed to get the EBS volume in attached state", + Target: fmt.Sprintf("{EBS Volume ID: %v, Region: %v}", id, region), + } } } return nil } -//PostChaosVolumeStatusCheck is the volume state check after chaos completion +// PostChaosVolumeStatusCheck is the volume state check after chaos completion func PostChaosVolumeStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails) error { var ec2InstanceIDList []string @@ -127,26 +150,28 @@ func PostChaosVolumeStatusCheck(experimentsDetails *experimentTypes.ExperimentDe //Get volume attachment details ec2InstanceID, _, err := GetVolumeAttachmentDetails(volumeID, experimentsDetails.VolumeTag, experimentsDetails.Region) if err != nil || ec2InstanceID == "" { - return errors.Errorf("fail to get the attachment info, err: %v", err) + return stacktrace.Propagate(err, "failed to get the attachment info") } ec2InstanceIDList = append(ec2InstanceIDList, ec2InstanceID) //Getting the EBS volume attachment status ebsState, err := GetEBSStatus(volumeID, ec2InstanceIDList[i], experimentsDetails.Region) if err != nil { - return errors.Errorf("failed to get the ebs status, err: %v", err) + return stacktrace.Propagate(err, "failed to get the EBS status") } if ebsState != "attached" { - return errors.Errorf("'%v' volume is not in attached state post chaos", volumeID) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "Volume is not in attached state post chaos", + Target: fmt.Sprintf("{EBS Volume ID: %v, Region: %v}", volumeID, experimentsDetails.Region), + } } - } return nil } -//CheckEBSDetachmentInitialisation will check the start of volume detachment process +// CheckEBSDetachmentInitialisation will check the start of volume detachment process func CheckEBSDetachmentInitialisation(volumeIDs []string, instanceID []string, region string) error { - timeout := 3 delay := 1 return retry. @@ -157,10 +182,14 @@ func CheckEBSDetachmentInitialisation(volumeIDs []string, instanceID []string, r for i, id := range volumeIDs { currentVolumeState, err := GetEBSStatus(id, instanceID[i], region) if err != nil { - return errors.Errorf("failed to get the volume status") + return stacktrace.Propagate(err, "failed to get the volume status") } if currentVolumeState == "attached" { - return errors.Errorf("the volume detachment has not started yet for volume %v", id) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: "The volume detachment process hasn't started yet", + Target: fmt.Sprintf("{EBS Volume ID: %v, EC2 Instance ID: %v, Region: %v}", id, instanceID[i], region), + } } } return nil diff --git a/pkg/cloud/aws/ec2/ec2-instance-status.go b/pkg/cloud/aws/ec2/ec2-instance-status.go index b0f9b051f..49b137b61 100644 --- a/pkg/cloud/aws/ec2/ec2-instance-status.go +++ b/pkg/cloud/aws/ec2/ec2-instance-status.go @@ -1,16 +1,18 @@ package aws import ( + "fmt" "strings" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/common" "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) -//GetEC2InstanceStatus will verify and give the ec2 instance details along with ebs volume idetails. +// GetEC2InstanceStatus will verify and give the ec2 instance details along with ebs volume idetails. func GetEC2InstanceStatus(instanceID, region string) (string, error) { var err error @@ -23,7 +25,11 @@ func GetEC2InstanceStatus(instanceID, region string) (string, error) { // Call to get detailed information on each instance result, err := ec2Svc.DescribeInstances(nil) if err != nil { - return "", err + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to describe the instances: %v", err), + Target: fmt.Sprintf("{EC2 Instance ID: %v, Region: %v}", instanceID, region), + } } for _, reservationDetails := range result.Reservations { @@ -35,42 +41,54 @@ func GetEC2InstanceStatus(instanceID, region string) (string, error) { } } } - return "", errors.Errorf("failed to get the status of ec2 instance with instanceID %v", instanceID) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "failed to get the status of EC2 instance", + Target: fmt.Sprintf("{EC2 Instance ID: %v, Region: %v}", instanceID, region), + } } -//InstanceStatusCheckByID is used to check the instance status of all the instance under chaos. +// InstanceStatusCheckByID is used to check the instance status of all the instance under chaos. func InstanceStatusCheckByID(instanceID, region string) error { instanceIDList := strings.Split(instanceID, ",") - if len(instanceIDList) == 0 { - return errors.Errorf("no instance id found to terminate") + if instanceID == "" || len(instanceIDList) == 0 { + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "no instance id provided to terminate", + Target: fmt.Sprintf("{EC2 Instance ID: %v, Region: %v}", instanceID, region), + } } log.Infof("[Info]: The instances under chaos(IUC) are: %v", instanceIDList) return InstanceStatusCheck(instanceIDList, region) } -//InstanceStatusCheckByTag is used to check the instance status of all the instance under chaos. +// InstanceStatusCheckByTag is used to check the instance status of all the instance under chaos. func InstanceStatusCheckByTag(instanceTag, region string) error { instanceIDList, err := GetInstanceList(instanceTag, region) if err != nil { - return err + return stacktrace.Propagate(err, "failed to get the instance id list") } log.Infof("[Info]: The instances under chaos(IUC) are: %v", instanceIDList) return InstanceStatusCheck(instanceIDList, region) } -//InstanceStatusCheck is used to check the instance status of the instances +// InstanceStatusCheck is used to check the instance status of the instances func InstanceStatusCheck(targetInstanceIDList []string, region string) error { for _, id := range targetInstanceIDList { instanceState, err := GetEC2InstanceStatus(id, region) if err != nil { - return err + return stacktrace.Propagate(err, "failed to get the instance status") } if instanceState != "running" { - return errors.Errorf("failed to get the ec2 instance '%v' in running sate, current state: %v", id, instanceState) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("EC2 instance is not in running sate, current state: %v", instanceState), + Target: fmt.Sprintf("{EC2 Instance ID: %v, Region: %v}", id, region), + } } } return nil @@ -84,19 +102,27 @@ func PreChaosNodeCountCheck(instanceIDList []string, region string) (int, string var err error // fetching all instances in the autoscaling groups if nodeList, err = getAutoScalingInstances(region); err != nil { - return 0, "", err + return 0, "", stacktrace.Propagate(err, "failed to get the autoscaling instances") } // finding the autoscaling group name for the provided instance id if autoScalingGroupName = findAutoScalingGroupName(instanceIDList[0], nodeList); autoScalingGroupName == "" { - return 0, "", errors.Errorf("instances not part of autoscaling group") + return 0, "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "instances not part of an autoscaling group", + Target: fmt.Sprintf("EC2 Instance IDs: %v, Region: %v", instanceIDList, region), + } } // finding the active node count for the autoscaling group nodeCount := findActiveNodeCount(autoScalingGroupName, region, nodeList) log.Infof("[Info]: Pre-Chaos Active Node Count: %v", nodeCount) if len(instanceIDList) > nodeCount { - return 0, "", errors.Errorf("active node count less than number of provided instance IDs") + return 0, "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "active node count less than number of provided instance IDs", + Target: fmt.Sprintf("{Auto Scaling Group: %v, Region: %v}", autoScalingGroupName, region), + } } return nodeCount, autoScalingGroupName, nil @@ -110,10 +136,14 @@ func PostChaosNodeCountCheck(activeNodeCount int, autoScalingGroupName, region s // fetching all instances in the autoscaling groups if nodeList, err = getAutoScalingInstances(region); err != nil { - return err + return stacktrace.Propagate(err, "failed to get the autoscaling instances") } if autoScalingGroupName == "" { - return errors.Errorf("autoscaling group not provided") + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "autoscaling group not provided", + Target: fmt.Sprintf("{Region: %v}", region), + } } // finding the active node count for the autoscaling group @@ -122,7 +152,11 @@ func PostChaosNodeCountCheck(activeNodeCount int, autoScalingGroupName, region s // checking if the post-chaos and pre-chaos node count are equal if nodeCount != activeNodeCount { - return errors.Errorf("post-chaos active node count is not equal to the pre-chaos node count") + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "post-chaos active node count is not equal to the pre-chaos active node count", + Target: fmt.Sprintf("{Auto Scaling Group: %v, Region: %v}", autoScalingGroupName, region), + } } return nil } @@ -136,7 +170,11 @@ func getAutoScalingInstances(region string) ([]*autoscaling.InstanceDetails, err autoScalingInput := autoscaling.DescribeAutoScalingInstancesInput{} nodeList, err := autoScalingSvc.DescribeAutoScalingInstances(&autoScalingInput) if err != nil { - return nil, errors.Errorf("failed to get the autoscaling instances, err: %v", err) + return nil, cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: fmt.Sprintf("failed to get the autoscaling instances: %v", err), + Target: fmt.Sprintf("{Region: %v}", region), + } } return nodeList.AutoScalingInstances, nil } @@ -171,7 +209,7 @@ func findActiveNodeCount(autoScalingGroupName, region string, nodeList []*autosc for _, id := range findInstancesInAutoScalingGroup(autoScalingGroupName, nodeList) { instanceState, err := GetEC2InstanceStatus(id, region) if err != nil { - log.Errorf("instance status check failed for %v, err: %v", id, err) + log.Errorf("Instance status check failed for %v, err: %v", id, err) } if instanceState == "running" { nodeCount += 1 diff --git a/pkg/cloud/aws/ec2/ec2-operations.go b/pkg/cloud/aws/ec2/ec2-operations.go index 59f628f5a..596e7db2c 100644 --- a/pkg/cloud/aws/ec2/ec2-operations.go +++ b/pkg/cloud/aws/ec2/ec2-operations.go @@ -1,15 +1,17 @@ package aws import ( + "fmt" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/common" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" ) @@ -29,10 +31,14 @@ func EC2Stop(instanceID, region string) error { } result, err := ec2Svc.StopInstances(input) if err != nil { - return common.CheckAWSError(err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to stop EC2 instance: %v", common.CheckAWSError(err).Error()), + Target: fmt.Sprintf("{EC2 Instance ID: %v, Region: %v}", instanceID, region), + } } - log.InfoWithValues("Stopping ec2 instance:", logrus.Fields{ + log.InfoWithValues("Stopping EC2 instance:", logrus.Fields{ "CurrentState": *result.StoppingInstances[0].CurrentState.Name, "PreviousState": *result.StoppingInstances[0].PreviousState.Name, "InstanceId": *result.StoppingInstances[0].InstanceId, @@ -57,10 +63,14 @@ func EC2Start(instanceID, region string) error { result, err := ec2Svc.StartInstances(input) if err != nil { - return common.CheckAWSError(err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("failed to start EC2 instance: %v", common.CheckAWSError(err).Error()), + Target: fmt.Sprintf("{EC2 Instance ID: %v, Region: %v}", instanceID, region), + } } - log.InfoWithValues("Starting ec2 instance:", logrus.Fields{ + log.InfoWithValues("Starting EC2 instance:", logrus.Fields{ "CurrentState": *result.StartingInstances[0].CurrentState.Name, "PreviousState": *result.StartingInstances[0].PreviousState.Name, "InstanceId": *result.StartingInstances[0].InstanceId, @@ -69,7 +79,7 @@ func EC2Start(instanceID, region string) error { return nil } -//WaitForEC2Down will wait for the ec2 instance to get in stopped state +// WaitForEC2Down will wait for the ec2 instance to get in stopped state func WaitForEC2Down(timeout, delay int, managedNodegroup, region, instanceID string) error { log.Info("[Status]: Checking EC2 instance status") @@ -80,18 +90,22 @@ func WaitForEC2Down(timeout, delay int, managedNodegroup, region, instanceID str instanceState, err := GetEC2InstanceStatus(instanceID, region) if err != nil { - return errors.Errorf("failed to get the instance status") + return stacktrace.Propagate(err, "failed to get the instance status") } if (managedNodegroup != "enable" && instanceState != "stopped") || (managedNodegroup == "enable" && instanceState != "terminated") { log.Infof("The instance state is %v", instanceState) - return errors.Errorf("instance is not yet in stopped state") + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: "instance is not in stopped state", + Target: fmt.Sprintf("{EC2 Instance ID: %v, Region: %v}", instanceID, region), + } } log.Infof("The instance state is %v", instanceState) return nil }) } -//WaitForEC2Up will wait for the ec2 instance to get in running state +// WaitForEC2Up will wait for the ec2 instance to get in running state func WaitForEC2Up(timeout, delay int, managedNodegroup, region, instanceID string) error { log.Info("[Status]: Checking EC2 instance status") @@ -102,11 +116,15 @@ func WaitForEC2Up(timeout, delay int, managedNodegroup, region, instanceID strin instanceState, err := GetEC2InstanceStatus(instanceID, region) if err != nil { - return errors.Errorf("failed to get the instance status") + return stacktrace.Propagate(err, "failed to get the instance status") } if instanceState != "running" { log.Infof("The instance state is %v", instanceState) - return errors.Errorf("instance is not yet in running state") + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: "instance is not in running state within timeout", + Target: fmt.Sprintf("{EC2 Instance ID: %v, Region: %v}", instanceID, region), + } } log.Infof("The instance state is %v", instanceState) return nil @@ -114,13 +132,16 @@ func WaitForEC2Up(timeout, delay int, managedNodegroup, region, instanceID strin } -//GetInstanceList will filter out the target instance under chaos using tag filters or the instance list provided. +// GetInstanceList will filter out the target instance under chaos using tag filters or the instance list provided. func GetInstanceList(instanceTag, region string) ([]string, error) { var instanceList []string switch instanceTag { case "": - return nil, errors.Errorf("fail to get the instance tag please provide a valid instance tag") + return nil, cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: "failed to get the instance tag, invalid instance tag", + Target: fmt.Sprintf("{EC2 Instance Tag: %v, Region: %v}", instanceTag, region)} default: instanceTag := strings.Split(instanceTag, ":") @@ -139,7 +160,10 @@ func GetInstanceList(instanceTag, region string) ([]string, error) { ec2Svc := ec2.New(sess) res, err := ec2Svc.DescribeInstances(params) if err != nil { - return nil, errors.Errorf("fail to list the insances, err: %v", err.Error()) + return nil, cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: fmt.Sprintf("failed to list instances: %v", err), + Target: fmt.Sprintf("{EC2 Instance Tag: %v, Region: %v}", instanceTag, region)} } for _, reservationDetails := range res.Reservations { diff --git a/pkg/cloud/aws/ssm/ssm-documentation.go b/pkg/cloud/aws/ssm/ssm-documentation.go index 841949051..2950c6dda 100644 --- a/pkg/cloud/aws/ssm/ssm-documentation.go +++ b/pkg/cloud/aws/ssm/ssm-documentation.go @@ -1,21 +1,26 @@ package ssm import ( - "io/ioutil" + "fmt" + "os" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/common" - "github.com/pkg/errors" ) // CreateAndUploadDocument will create and add the ssm document in aws service monitoring docs. func CreateAndUploadDocument(documentName, documentType, documentFormat, documentPath, region string) error { sesh := common.GetAWSSession(region) - openFile, err := ioutil.ReadFile(documentPath) + openFile, err := os.ReadFile(documentPath) if err != nil { - return errors.Errorf("fail to read the file err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to read the file: %v", err), + Target: fmt.Sprintf("{SSM Document Path: %v/%v.%v, Region: %v}", documentPath, documentName, documentFormat, region), + } } documentContent := string(openFile) @@ -30,7 +35,11 @@ func CreateAndUploadDocument(documentName, documentType, documentFormat, documen }) if err != nil { - return errors.Errorf("fail to create docs, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to upload docs: %v", err), + Target: fmt.Sprintf("{SSM Document Path: %v/%v.%v, Region: %v}", documentPath, documentName, documentFormat, region), + } } return nil } @@ -44,7 +53,11 @@ func SSMDeleteDocument(documentName, region string) error { Name: aws.String(documentName), }) if err != nil { - return common.CheckAWSError(err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("failed to delete SSM document: %v", common.CheckAWSError(err).Error()), + Target: fmt.Sprintf("{SSM Document Name: %v, Region: %v}", documentName, region), + } } return nil } diff --git a/pkg/cloud/aws/ssm/ssm-operations.go b/pkg/cloud/aws/ssm/ssm-operations.go index 4c9e2b3e0..5dd0c0385 100644 --- a/pkg/cloud/aws/ssm/ssm-operations.go +++ b/pkg/cloud/aws/ssm/ssm-operations.go @@ -1,6 +1,7 @@ package ssm import ( + "fmt" "strconv" "strings" "time" @@ -8,11 +9,12 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/common" ec2 "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" ) @@ -42,7 +44,11 @@ func SendSSMCommand(experimentsDetails *experimentTypes.ExperimentDetails, ec2In MaxErrors: aws.String("0"), }) if err != nil { - return "", common.CheckAWSError(err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to send SSM command: %v", common.CheckAWSError(err).Error()), + Target: fmt.Sprintf("{EC2 Instance ID: %v, Region: %v}", ec2InstanceID, experimentsDetails.Region), + } } return *res.Command.CommandId, nil @@ -75,8 +81,8 @@ func getParameters(experimentsDetails *experimentTypes.ExperimentDetails) map[st return parameter } -//WaitForCommandStatus will wait until the ssm command comes in target status -func WaitForCommandStatus(status, commandID, EC2InstanceID, region string, timeout, delay int) error { +// WaitForCommandStatus will wait until the ssm command comes in target status +func WaitForCommandStatus(status, commandID, ec2InstanceID, region string, timeout, delay int) error { log.Info("[Status]: Checking SSM command status") return retry. @@ -84,36 +90,43 @@ func WaitForCommandStatus(status, commandID, EC2InstanceID, region string, timeo Wait(time.Duration(delay) * time.Second). Try(func(attempt uint) error { - commandStatus, err := getSSMCommandStatus(commandID, EC2InstanceID, region) + commandStatus, err := getSSMCommandStatus(commandID, ec2InstanceID, region) if err != nil { - return errors.Errorf("failed to get the ssm command status") + return stacktrace.Propagate(err, "failed to get the SSM command status") } if commandStatus != status { log.Infof("The instance state is %v", commandStatus) - return errors.Errorf("ssm command is not yet in %v state", status) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("SSM command is not in %v state within timeout", status), + Target: fmt.Sprintf("{EC2 Instance ID: %v, Region: %v}", ec2InstanceID, region)} } - log.Infof("The ssm command status is %v", commandStatus) + log.Infof("The SSM command status is %v", commandStatus) return nil }) } // getSSMCommandStatus will create and add the ssm document in aws service monitoring docs. -func getSSMCommandStatus(commandID, EC2InstanceID, region string) (string, error) { +func getSSMCommandStatus(commandID, ec2InstanceID, region string) (string, error) { sesh := common.GetAWSSession(region) ssmClient := ssm.New(sesh) cmdOutput, err := ssmClient.GetCommandInvocation(&ssm.GetCommandInvocationInput{ CommandId: aws.String(commandID), - InstanceId: aws.String(EC2InstanceID), + InstanceId: aws.String(ec2InstanceID), }) if err != nil { - return "", common.CheckAWSError(err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to get SSM command status: %v", common.CheckAWSError(err).Error()), + Target: fmt.Sprintf("{Command ID: %v, EC2 Instance ID: %v, Region: %v}", commandID, ec2InstanceID, region), + } } return *cmdOutput.Status, nil } -//CheckInstanceInformation will check if the instance has permission to do smm api calls +// CheckInstanceInformation will check if the instance has permission to do smm api calls func CheckInstanceInformation(experimentsDetails *experimentTypes.ExperimentDetails) error { var instanceIDList []string @@ -122,7 +135,7 @@ func CheckInstanceInformation(experimentsDetails *experimentTypes.ExperimentDeta instanceIDList = strings.Split(experimentsDetails.EC2InstanceID, ",") default: if err := CheckTargetInstanceStatus(experimentsDetails); err != nil { - return err + return stacktrace.Propagate(err, "failed to check target instance(s) status") } instanceIDList = experimentsDetails.TargetInstanceIDList @@ -132,7 +145,11 @@ func CheckInstanceInformation(experimentsDetails *experimentTypes.ExperimentDeta for _, ec2ID := range instanceIDList { res, err := ssmClient.DescribeInstanceInformation(&ssm.DescribeInstanceInformationInput{}) if err != nil { - return common.CheckAWSError(err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to get instance information: %v", common.CheckAWSError(err).Error()), + Target: fmt.Sprintf("{EC2 Instance ID: %v, Region: %v}", ec2ID, experimentsDetails.Region), + } } isInstanceFound := false if len(res.InstanceInformationList) != 0 { @@ -143,15 +160,19 @@ func CheckInstanceInformation(experimentsDetails *experimentTypes.ExperimentDeta } } if !isInstanceFound { - return errors.Errorf("error: the instance %v might not have suitable permission or iam attached to it. use \"aws ssm describe-instance-information\" to check the available instances", ec2ID) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("the instance %v might not have suitable permission or IAM attached to it. Run command `aws ssm describe-instance-information` to check for available instances", ec2ID), + Target: fmt.Sprintf("{EC2 Instance ID: %v, Region: %v}", ec2ID, experimentsDetails.Region), + } } } } - log.Info("[Info]: The target instance have permission to perform ssm api calls") + log.Info("[Info]: The target instance have permission to perform SSM API calls") return nil } -//CancelCommand will cancel the ssm command +// CancelCommand will cancel the ssm command func CancelCommand(commandIDs, region string) error { sesh := common.GetAWSSession(region) ssmClient := ssm.New(sesh) @@ -159,27 +180,35 @@ func CancelCommand(commandIDs, region string) error { CommandId: aws.String(commandIDs), }) if err != nil { - return common.CheckAWSError(err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("failed to cancel SSM command: %v", common.CheckAWSError(err).Error()), + Target: fmt.Sprintf("{SSM Command ID: %v, Region: %v}", commandIDs, region), + } } return nil } -//CheckTargetInstanceStatus will select the target instance which are in running state and +// CheckTargetInstanceStatus will select the target instance which are in running state and // filtered from the given instance tag and check its status func CheckTargetInstanceStatus(experimentsDetails *experimentTypes.ExperimentDetails) error { instanceIDList, err := ec2.GetInstanceList(experimentsDetails.EC2InstanceTag, experimentsDetails.Region) if err != nil { - return err + return stacktrace.Propagate(err, "failed to get the instance list") } if len(instanceIDList) == 0 { - return errors.Errorf("no instance found with the given tag %v, in region %v", experimentsDetails.EC2InstanceTag, experimentsDetails.Region) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: "no instance found", + Target: fmt.Sprintf("{EC2 Instance Tag: %v, Region: %v}", experimentsDetails.EC2InstanceTag, experimentsDetails.Region), + } } for _, id := range instanceIDList { instanceState, err := ec2.GetEC2InstanceStatus(id, experimentsDetails.Region) if err != nil { - return errors.Errorf("fail to get the instance status while selecting the target instances, err: %v", err) + return stacktrace.Propagate(err, "failed to get the instance status while selecting the target instances") } if instanceState == "running" { experimentsDetails.TargetInstanceIDList = append(experimentsDetails.TargetInstanceIDList, id) @@ -187,7 +216,11 @@ func CheckTargetInstanceStatus(experimentsDetails *experimentTypes.ExperimentDet } if len(experimentsDetails.TargetInstanceIDList) == 0 { - return errors.Errorf("fail to get any running instance having instance tag: %v", experimentsDetails.EC2InstanceTag) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "failed to get any running instance with instance tag", + Target: fmt.Sprintf("{EC2 Instance Tag: %v, Region: %v}", experimentsDetails.EC2InstanceTag, experimentsDetails.Region), + } } log.InfoWithValues("[Info]: Targeting the running instances filtered from instance tag", logrus.Fields{ diff --git a/pkg/cloud/azure/common/common.go b/pkg/cloud/azure/common/common.go index bd0929a6e..5670c42c2 100644 --- a/pkg/cloud/azure/common/common.go +++ b/pkg/cloud/azure/common/common.go @@ -2,11 +2,12 @@ package common import ( "encoding/json" - "io/ioutil" + "fmt" + "io" "os" "strings" - "github.com/pkg/errors" + "github.com/litmuschaos/litmus-go/pkg/cerrors" ) // StringInSlice will check and return whether a string is present inside a slice or not @@ -25,23 +26,35 @@ func GetSubscriptionID() (string, error) { var err error authFile, err := os.Open(os.Getenv("AZURE_AUTH_LOCATION")) if err != nil { - return "", errors.Errorf("fail to open auth file, err: %v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeGeneric, + Reason: fmt.Sprintf("failed to open auth file: %v", err), + } } - authFileContent, err := ioutil.ReadAll(authFile) + authFileContent, err := io.ReadAll(authFile) if err != nil { - return "", errors.Errorf("fail to read auth file, err: %v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeGeneric, + Reason: fmt.Sprintf("failed to read auth file: %v", err), + } } details := make(map[string]string) if err := json.Unmarshal(authFileContent, &details); err != nil { - return "", errors.Errorf("fail to unmarshal file, err: %v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeGeneric, + Reason: fmt.Sprintf("failed to unmarshal file: %v", err), + } } if id, contains := details["subscriptionId"]; contains { return id, nil } - return "", errors.Errorf("The auth file does not have a subscriptionId field") + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeGeneric, + Reason: "The auth file does not have a subscriptionId field", + } } // GetScaleSetNameAndInstanceId extracts the scale set name and VM id from the instance name diff --git a/pkg/cloud/azure/disk/disk-operations.go b/pkg/cloud/azure/disk/disk-operations.go index 6d36e57d8..e81ce4bef 100644 --- a/pkg/cloud/azure/disk/disk-operations.go +++ b/pkg/cloud/azure/disk/disk-operations.go @@ -2,37 +2,47 @@ package azure import ( "context" + "fmt" "time" "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/litmuschaos/litmus-go/pkg/azure/disk-loss/types" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/cloud/azure/common" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) // DetachDisks will detach the list of disk provided for the specific VM instance or scale set vm instance func DetachDisks(subscriptionID, resourceGroup, azureInstanceName, scaleSet string, diskNameList []string) error { + authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) + if err != nil { + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } + } + // if the instance is virtual machine scale set (aks node) if scaleSet == "enable" { // Setup and authorize vm client vmssClient := compute.NewVirtualMachineScaleSetVMsClient(subscriptionID) - authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) - - if err != nil { - return errors.Errorf("fail to setup authorization, err: %v", err) - } vmssClient.Authorizer = authorizer // Fetch the vm instance scaleSetName, vmId := common.GetScaleSetNameAndInstanceId(azureInstanceName) vm, err := vmssClient.Get(context.TODO(), resourceGroup, scaleSetName, vmId, compute.InstanceViewTypes("instanceView")) if err != nil { - return errors.Errorf("fail get instance, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to get instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } // Create list of Disks that are not to be detached var keepAttachedList []compute.DataDisk @@ -55,23 +65,26 @@ func DetachDisks(subscriptionID, resourceGroup, azureInstanceName, scaleSet stri // Update the VM with the keepAttachedList to detach the specified disks _, err = vmssClient.Update(context.TODO(), resourceGroup, scaleSetName, vmId, vm) if err != nil { - return errors.Errorf("cannot detach disk, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("cannot detach disk: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } } else { // Setup and authorize vm client vmClient := compute.NewVirtualMachinesClient(subscriptionID) - authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) - - if err != nil { - return errors.Errorf("fail to setup authorization, err: %v", err) - } vmClient.Authorizer = authorizer // Fetch the vm instance vm, err := vmClient.Get(context.TODO(), resourceGroup, azureInstanceName, compute.InstanceViewTypes("instanceView")) if err != nil { - return errors.Errorf("fail get instance, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to get instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } // Create list of Disks that are not to be detached var keepAttachedList []compute.DataDisk @@ -92,7 +105,11 @@ func DetachDisks(subscriptionID, resourceGroup, azureInstanceName, scaleSet stri // Update the VM with the keepAttachedList to detach the specified disks _, err = vmClient.CreateOrUpdate(context.TODO(), resourceGroup, azureInstanceName, vm) if err != nil { - return errors.Errorf("cannot detach disk, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("cannot detach disk(s): %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } } return nil @@ -101,22 +118,30 @@ func DetachDisks(subscriptionID, resourceGroup, azureInstanceName, scaleSet stri // AttachDisk will attach the list of disk provided for the specific VM instance func AttachDisk(subscriptionID, resourceGroup, azureInstanceName, scaleSet string, diskList *[]compute.DataDisk) error { + authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) + if err != nil { + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } + } + // if the instance is virtual machine scale set (aks node) if scaleSet == "enable" { // Setup and authorize vm client vmClient := compute.NewVirtualMachineScaleSetVMsClient(subscriptionID) - authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) - - if err != nil { - return errors.Errorf("fail to setup authorization, err: %v", err) - } vmClient.Authorizer = authorizer // Fetch the vm instance scaleSetName, vmId := common.GetScaleSetNameAndInstanceId(azureInstanceName) vm, err := vmClient.Get(context.TODO(), resourceGroup, scaleSetName, vmId, compute.InstanceViewTypes("instanceView")) if err != nil { - return errors.Errorf("fail get instance, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("failed to get instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } vm.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = diskList @@ -126,22 +151,23 @@ func AttachDisk(subscriptionID, resourceGroup, azureInstanceName, scaleSet strin // Update the VM properties _, err = vmClient.Update(context.TODO(), resourceGroup, scaleSetName, vmId, vm) if err != nil { - return errors.Errorf("cannot attach disk, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("cannot attach disk: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup)} } } else { // Setup and authorize vm client vmClient := compute.NewVirtualMachinesClient(subscriptionID) - authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) - - if err != nil { - return errors.Errorf("fail to setup authorization, err: %v", err) - } vmClient.Authorizer = authorizer // Fetch the vm instance vm, err := vmClient.Get(context.TODO(), resourceGroup, azureInstanceName, compute.InstanceViewTypes("instanceView")) if err != nil { - return errors.Errorf("fail get instance, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("failed to get instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup)} } // Attach the disk to VM properties @@ -150,7 +176,10 @@ func AttachDisk(subscriptionID, resourceGroup, azureInstanceName, scaleSet strin // Update the VM properties _, err = vmClient.CreateOrUpdate(context.TODO(), resourceGroup, azureInstanceName, vm) if err != nil { - return errors.Errorf("cannot attach disk, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("cannot attach disk(s): %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup)} } } return nil @@ -164,11 +193,15 @@ func WaitForDiskToAttach(experimentsDetails *types.ExperimentDetails, diskName s Try(func(attempt uint) error { diskState, err := GetDiskStatus(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, diskName) if err != nil { - return errors.Errorf("failed to get the disk status, err: %v", err) + return stacktrace.Propagate(err, "failed to get the disk status") } if diskState != "Attached" { - log.Infof("[Status]: Disk %v is not yet attached, state: %v", diskName, diskState) - return errors.Errorf("Disk %v is not yet attached, state: %v", diskName, diskState) + log.Infof("[Status]: Disk %v is not yet attached, current state: %v", diskName, diskState) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("Disk is not attached within timeout, disk state: %s", diskState), + Target: fmt.Sprintf("{Azure Disk Name: %v, Resource Group: %v}", diskName, experimentsDetails.ResourceGroup), + } } log.Infof("[Status]: Disk %v is Attached", diskName) return nil @@ -183,11 +216,15 @@ func WaitForDiskToDetach(experimentsDetails *types.ExperimentDetails, diskName s Try(func(attempt uint) error { diskState, err := GetDiskStatus(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, diskName) if err != nil { - return errors.Errorf("failed to get the disk status, err: %v", err) + return stacktrace.Propagate(err, "failed to get the disk status") } if diskState != "Unattached" { log.Infof("[Status]: Disk %v is not yet detached, state: %v", diskName, diskState) - return errors.Errorf("Disk %v is not yet detached, state: %v", diskName, diskState) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("Disk is not detached within timeout, disk state: %s", diskState), + Target: fmt.Sprintf("{Azure Disk Name: %v, Resource Group: %v}", diskName, experimentsDetails.ResourceGroup), + } } log.Infof("[Status]: Disk %v is Detached", diskName) return nil diff --git a/pkg/cloud/azure/disk/disk-status.go b/pkg/cloud/azure/disk/disk-status.go index cea44ce2c..b28ade32a 100644 --- a/pkg/cloud/azure/disk/disk-status.go +++ b/pkg/cloud/azure/disk/disk-status.go @@ -2,60 +2,69 @@ package azure import ( "context" + "fmt" "regexp" "strings" "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/cloud/azure/common" - - "github.com/pkg/errors" ) // GetInstanceDiskList will fetch the disks attached to an instance func GetInstanceDiskList(subscriptionID, resourceGroup, scaleSet, azureInstanceName string) (*[]compute.DataDisk, error) { + authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) + if err != nil { + return nil, cerrors.Error{ + ErrorCode: cerrors.ErrorTypeGeneric, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } + } + // if the instance is of virtual machine scale set (aks node) if scaleSet == "enable" { vmClient := compute.NewVirtualMachineScaleSetVMsClient(subscriptionID) - authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) - if err != nil { - return nil, errors.Errorf("fail to setup authorization, err: %v", err) - } vmClient.Authorizer = authorizer // Fetch the vm instance scaleSetName, vmId := common.GetScaleSetNameAndInstanceId(azureInstanceName) vm, err := vmClient.Get(context.TODO(), resourceGroup, scaleSetName, vmId, compute.InstanceViewTypes("instanceView")) if err != nil { - return nil, errors.Errorf("fail get instance, err: %v", err) + return nil, cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: fmt.Sprintf("failed to get instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } // Get the disks attached to the instance list := vm.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks return list, nil - } + } else { + // Setup and authorize vm client + vmClient := compute.NewVirtualMachinesClient(subscriptionID) - // Setup and authorize vm client - vmClient := compute.NewVirtualMachinesClient(subscriptionID) - authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) + vmClient.Authorizer = authorizer - if err != nil { - return nil, errors.Errorf("fail to setup authorization, err: %v", err) - } - vmClient.Authorizer = authorizer + // Fetch the vm instance + vm, err := vmClient.Get(context.TODO(), resourceGroup, azureInstanceName, compute.InstanceViewTypes("instanceView")) + if err != nil { + return nil, cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: fmt.Sprintf("failed to get instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } + } - // Fetch the vm instance - vm, err := vmClient.Get(context.TODO(), resourceGroup, azureInstanceName, compute.InstanceViewTypes("instanceView")) - if err != nil { - return nil, errors.Errorf("fail get instance, err: %v", err) + // Get the disks attached to the instance + list := vm.VirtualMachineProperties.StorageProfile.DataDisks + return list, nil } - - // Get the disks attached to the instance - list := vm.VirtualMachineProperties.StorageProfile.DataDisks - return list, nil } // GetDiskStatus will get the status of disk (attached/unattached) @@ -64,16 +73,23 @@ func GetDiskStatus(subscriptionID, resourceGroup, diskName string) (compute.Disk // Setup and authorize disk client diskClient := compute.NewDisksClient(subscriptionID) authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) - if err != nil { - return "", errors.Errorf("fail to setup authorization, err: %v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeGeneric, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Disk Name: %v, Resource Group: %v}", diskName, resourceGroup), + } } diskClient.Authorizer = authorizer // Get the disk status disk, err := diskClient.Get(context.TODO(), resourceGroup, diskName) if err != nil { - return "", errors.Errorf("failed to get disk, err:%v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to get disk: %v", err), + Target: fmt.Sprintf("{Azure Disk Name: %v, Resource Group: %v}", diskName, resourceGroup), + } } return disk.DiskProperties.DiskState, nil } @@ -86,20 +102,37 @@ func CheckVirtualDiskWithInstance(subscriptionID, virtualDiskNames, resourceGrou authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) if err != nil { - return errors.Errorf("fail to setup authorization, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeGeneric, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Resource Group: %v}", resourceGroup), + } } diskClient.Authorizer = authorizer // Creating an array of the name of the attached disks diskNameList := strings.Split(virtualDiskNames, ",") + if virtualDiskNames == "" || len(diskNameList) == 0 { + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "no disk name provided", + Target: fmt.Sprintf("{Resource Group: %v}", resourceGroup), + } + } for _, diskName := range diskNameList { disk, err := diskClient.Get(context.Background(), resourceGroup, diskName) if err != nil { - return errors.Errorf("failed to get disk: %v, err: %v", diskName, err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to get disk: %v", err), + Target: fmt.Sprintf("{Azure Disk Name: %v, Resource Group: %v}", diskName, resourceGroup)} } if disk.ManagedBy == nil { - return errors.Errorf("disk %v not attached to any instance", diskName) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "disk is not attached to any instance", + Target: fmt.Sprintf("{Azure Disk Name: %v, Resource Group: %v}", diskName, resourceGroup)} } } return nil @@ -111,15 +144,18 @@ func GetInstanceNameForDisks(diskNameList []string, subscriptionID, resourceGrou // Setup and authorize disk client diskClient := compute.NewDisksClient(subscriptionID) authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) - - // Creating a map to store the instance name with attached disk(s) name - instanceNameWithDiskMap := make(map[string][]string) - if err != nil { - return instanceNameWithDiskMap, errors.Errorf("fail to setup authorization, err: %v", err) + return nil, cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Disk Names: %v, Resource Group: %v}", diskNameList, resourceGroup), + } } diskClient.Authorizer = authorizer + // Creating a map to store the instance name with attached disk(s) name + instanceNameWithDiskMap := make(map[string][]string) + // Using regex pattern match to extract instance name from disk.ManagedBy // /subscriptionID//resourceGroup//providers/Microsoft.Compute/virtualMachines/instanceName instanceNameRegex := regexp.MustCompile(`virtualMachines/`) @@ -127,7 +163,11 @@ func GetInstanceNameForDisks(diskNameList []string, subscriptionID, resourceGrou for _, diskName := range diskNameList { disk, err := diskClient.Get(context.TODO(), resourceGroup, diskName) if err != nil { - return instanceNameWithDiskMap, nil + return nil, cerrors.Error{ + ErrorCode: cerrors.ErrorTypeTargetSelection, + Reason: fmt.Sprintf("failed to get disk: %v", err), + Target: fmt.Sprintf("{Azure Disk Name: %v, Resource Group: %v}", diskName, resourceGroup), + } } res := instanceNameRegex.FindStringIndex(*disk.ManagedBy) i := res[1] diff --git a/pkg/cloud/azure/instance/instance-operations.go b/pkg/cloud/azure/instance/instance-operations.go index 0859b4d0e..3468c6444 100644 --- a/pkg/cloud/azure/instance/instance-operations.go +++ b/pkg/cloud/azure/instance/instance-operations.go @@ -2,16 +2,18 @@ package azure import ( "context" + "fmt" "time" "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/cloud/azure/common" + "github.com/palantir/stacktrace" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" ) // AzureInstanceStop stops the target instance @@ -20,7 +22,11 @@ func AzureInstanceStop(timeout, delay int, subscriptionID, resourceGroup, azureI authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) if err != nil { - return errors.Errorf("fail to setup authorization, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } vmClient.Authorizer = authorizer @@ -28,7 +34,11 @@ func AzureInstanceStop(timeout, delay int, subscriptionID, resourceGroup, azureI log.Info("[Info]: Stopping the instance") _, err = vmClient.PowerOff(context.TODO(), resourceGroup, azureInstanceName, &vmClient.SkipResourceProviderRegistration) if err != nil { - return errors.Errorf("fail to stop the %v instance, err: %v", azureInstanceName, err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to stop the instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } return nil @@ -41,7 +51,11 @@ func AzureInstanceStart(timeout, delay int, subscriptionID, resourceGroup, azure authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) if err != nil { - return errors.Errorf("fail to setup authorization, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } vmClient.Authorizer = authorizer @@ -49,7 +63,11 @@ func AzureInstanceStart(timeout, delay int, subscriptionID, resourceGroup, azure log.Info("[Info]: Starting back the instance to running state") _, err = vmClient.Start(context.TODO(), resourceGroup, azureInstanceName) if err != nil { - return errors.Errorf("fail to start the %v instance, err: %v", azureInstanceName, err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("failed to start the instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } return nil @@ -61,7 +79,11 @@ func AzureScaleSetInstanceStop(timeout, delay int, subscriptionID, resourceGroup authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) if err != nil { - return errors.Errorf("fail to setup authorization, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } vmssClient.Authorizer = authorizer @@ -71,7 +93,11 @@ func AzureScaleSetInstanceStop(timeout, delay int, subscriptionID, resourceGroup log.Info("[Info]: Stopping the instance") _, err = vmssClient.PowerOff(context.TODO(), resourceGroup, virtualMachineScaleSetName, virtualMachineId, &vmssClient.SkipResourceProviderRegistration) if err != nil { - return errors.Errorf("fail to stop the %v_%v instance, err: %v", virtualMachineScaleSetName, virtualMachineId, err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to stop the instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } return nil @@ -83,7 +109,10 @@ func AzureScaleSetInstanceStart(timeout, delay int, subscriptionID, resourceGrou authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) if err != nil { - return errors.Errorf("fail to setup authorization, err: %v", err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup)} } vmssClient.Authorizer = authorizer @@ -93,13 +122,17 @@ func AzureScaleSetInstanceStart(timeout, delay int, subscriptionID, resourceGrou log.Info("[Info]: Starting back the instance to running state") _, err = vmssClient.Start(context.TODO(), resourceGroup, virtualMachineScaleSetName, virtualMachineId) if err != nil { - return errors.Errorf("fail to start the %v_%v instance, err: %v", virtualMachineScaleSetName, virtualMachineId, err) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("failed to start the instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } return nil } -//WaitForAzureComputeDown will wait for the azure compute instance to get in stopped state +// WaitForAzureComputeDown will wait for the azure compute instance to get in stopped state func WaitForAzureComputeDown(timeout, delay int, scaleSet, subscriptionID, resourceGroup, azureInstanceName string) error { var instanceState string @@ -117,16 +150,20 @@ func WaitForAzureComputeDown(timeout, delay int, scaleSet, subscriptionID, resou instanceState, err = GetAzureInstanceStatus(subscriptionID, resourceGroup, azureInstanceName) } if err != nil { - return errors.Errorf("failed to get the instance status") + return stacktrace.Propagate(err, "failed to get the instance status") } if instanceState != "VM stopped" { - return errors.Errorf("instance is not yet in stopped state") + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: "instance is not in stopped within timeout", + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } return nil }) } -//WaitForAzureComputeUp will wait for the azure compute instance to get in running state +// WaitForAzureComputeUp will wait for the azure compute instance to get in running state func WaitForAzureComputeUp(timeout, delay int, scaleSet, subscriptionID, resourceGroup, azureInstanceName string) error { var instanceState string @@ -146,10 +183,14 @@ func WaitForAzureComputeUp(timeout, delay int, scaleSet, subscriptionID, resourc instanceState, err = GetAzureInstanceStatus(subscriptionID, resourceGroup, azureInstanceName) } if err != nil { - return errors.Errorf("failed to get instance status") + return stacktrace.Propagate(err, "failed to get instance status") } if instanceState != "VM running" { - return errors.Errorf("instance is not yet in running state") + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: "instance is not in running state within timeout", + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } return nil }) diff --git a/pkg/cloud/azure/instance/instance-status.go b/pkg/cloud/azure/instance/instance-status.go index b3039ddb1..3594b130d 100644 --- a/pkg/cloud/azure/instance/instance-status.go +++ b/pkg/cloud/azure/instance/instance-status.go @@ -2,78 +2,108 @@ package azure import ( "context" + "fmt" "strings" "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/cloud/azure/common" + "github.com/palantir/stacktrace" "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/pkg/errors" ) -//GetAzureInstanceStatus will verify the azure instance state details +// GetAzureInstanceStatus will verify the azure instance state details func GetAzureInstanceStatus(subscriptionID, resourceGroup, azureInstanceName string) (string, error) { vmClient := compute.NewVirtualMachinesClient(subscriptionID) authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) if err != nil { - return "", errors.Errorf("fail to setup authorization, err: %v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } vmClient.Authorizer = authorizer instanceDetails, err := vmClient.InstanceView(context.TODO(), resourceGroup, azureInstanceName) if err != nil { - return "", errors.Errorf("fail to get the instance to check status, err: %v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to get the instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } // The *instanceDetails.Statuses list contains the instance status details as shown below // Item 1: Provisioning succeeded // Item 2: VM running if len(*instanceDetails.Statuses) < 2 { - return "", errors.Errorf("fail to get the instatus vm status") + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "failed to get the instance status", + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } // To print VM status - log.Infof("[Status]: The instance %v state is: '%s'", azureInstanceName, *(*instanceDetails.Statuses)[1].DisplayStatus) + log.Infof("[Status]: The instance %v state is: '%v'", azureInstanceName, *(*instanceDetails.Statuses)[1].DisplayStatus) return *(*instanceDetails.Statuses)[1].DisplayStatus, nil } -//GetAzureScaleSetInstanceStatus will verify the azure instance state details in the scale set +// GetAzureScaleSetInstanceStatus will verify the azure instance state details in the scale set func GetAzureScaleSetInstanceStatus(subscriptionID, resourceGroup, virtualMachineScaleSetName, virtualMachineId string) (string, error) { vmssClient := compute.NewVirtualMachineScaleSetVMsClient(subscriptionID) authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) if err != nil { - return "", errors.Errorf("fail to setup authorization, err: %v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeGeneric, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v_%v, Resource Group: %v}", virtualMachineScaleSetName, virtualMachineId, resourceGroup), + } } vmssClient.Authorizer = authorizer instanceDetails, err := vmssClient.GetInstanceView(context.TODO(), resourceGroup, virtualMachineScaleSetName, virtualMachineId) if err != nil { - return "", errors.Errorf("fail to get the instance to check status, err: %v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to get the instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v_%v, Resource Group: %v}", virtualMachineScaleSetName, virtualMachineId, resourceGroup), + } } // The *instanceDetails.Statuses list contains the instance status details as shown below // Item 1: Provisioning succeeded // Item 2: VM running if len(*instanceDetails.Statuses) < 2 { - return "", errors.Errorf("fail to get the instatus vm status") + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "failed to get the instance status", + Target: fmt.Sprintf("{Azure Instance Name: %v_%v}", virtualMachineScaleSetName, virtualMachineId), + } } // To print VM status - log.Infof("[Status]: The instance %v_%v state is: '%s'", virtualMachineScaleSetName, virtualMachineId, *(*instanceDetails.Statuses)[1].DisplayStatus) + log.Infof("[Status]: The instance %v_%v state is: '%v'", virtualMachineScaleSetName, virtualMachineId, *(*instanceDetails.Statuses)[1].DisplayStatus) return *(*instanceDetails.Statuses)[1].DisplayStatus, nil } // InstanceStatusCheckByName is used to check the instance status of all the instance under chaos func InstanceStatusCheckByName(azureInstanceNames, scaleSet, subscriptionID, resourceGroup string) error { instanceNameList := strings.Split(azureInstanceNames, ",") - if len(instanceNameList) == 0 { - return errors.Errorf("no instance found to check the status") + if azureInstanceNames == "" || len(instanceNameList) == 0 { + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "no instance provided", + Target: fmt.Sprintf("{Azure Instance Names: %v, Resource Group: %v}", azureInstanceNames, resourceGroup), + } } log.Infof("[Info]: The instance under chaos(IUC) are: %v", instanceNameList) switch scaleSet { @@ -87,36 +117,48 @@ func InstanceStatusCheckByName(azureInstanceNames, scaleSet, subscriptionID, res // InstanceStatusCheck is used to check the instance status of given list of instances func InstanceStatusCheck(targetInstanceNameList []string, subscriptionID, resourceGroup string) error { - for _, vmName := range targetInstanceNameList { - instanceState, err := GetAzureInstanceStatus(subscriptionID, resourceGroup, vmName) + for _, azureInstanceName := range targetInstanceNameList { + instanceState, err := GetAzureInstanceStatus(subscriptionID, resourceGroup, azureInstanceName) if err != nil { - return err + return stacktrace.Propagate(err, "failed to get instance status") } if instanceState != "VM running" { - return errors.Errorf("failed to get the azure instance '%v' in running state, current state: %v", vmName, instanceState) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("azure instance in not in running state, current state: %v", instanceState), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } } return nil } -//GetAzureInstanceProvisionStatus will check for the azure instance provision state details +// GetAzureInstanceProvisionStatus will check for the azure instance provision state details func GetAzureInstanceProvisionStatus(subscriptionID, resourceGroup, azureInstanceName, scaleSet string) (string, error) { if scaleSet == "enable" { vmssClient := compute.NewVirtualMachineScaleSetVMsClient(subscriptionID) authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) if err != nil { - return "", errors.Errorf("fail to setup authorization, err: %v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeGeneric, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } vmssClient.Authorizer = authorizer scaleSetName, vmId := common.GetScaleSetNameAndInstanceId(azureInstanceName) vm, err := vmssClient.Get(context.TODO(), resourceGroup, scaleSetName, vmId, "instanceView") if err != nil { - return "", errors.Errorf("fail to get the instance to check status, err: %v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to get the instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } instanceDetails := vm.VirtualMachineScaleSetVMProperties.InstanceView // To print VM provision status - log.Infof("[Status]: The instance %v provision state is: '%s'", azureInstanceName, *(*instanceDetails.Statuses)[0].DisplayStatus) + log.Infof("[Status]: The instance %v provision state is: '%v'", azureInstanceName, *(*instanceDetails.Statuses)[0].DisplayStatus) return *(*instanceDetails.Statuses)[0].DisplayStatus, nil } @@ -124,16 +166,24 @@ func GetAzureInstanceProvisionStatus(subscriptionID, resourceGroup, azureInstanc authorizer, err := auth.NewAuthorizerFromFile(azure.PublicCloud.ResourceManagerEndpoint) if err != nil { - return "", errors.Errorf("fail to setup authorization, err: %v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeGeneric, + Reason: fmt.Sprintf("authorization set up failed: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } vmClient.Authorizer = authorizer instanceDetails, err := vmClient.InstanceView(context.TODO(), resourceGroup, azureInstanceName) if err != nil { - return "", errors.Errorf("fail to get the instance to check status, err: %v", err) + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to get the instance: %v", err), + Target: fmt.Sprintf("{Azure Instance Name: %v, Resource Group: %v}", azureInstanceName, resourceGroup), + } } // To print VM provision status - log.Infof("[Status]: The instance %v provision state is: '%s'", azureInstanceName, *(*instanceDetails.Statuses)[0].DisplayStatus) + log.Infof("[Status]: The instance %v provision state is: '%v'", azureInstanceName, *(*instanceDetails.Statuses)[0].DisplayStatus) return *(*instanceDetails.Statuses)[0].DisplayStatus, nil } @@ -148,7 +198,10 @@ func ScaleSetInstanceStatusCheck(targetInstanceNameList []string, subscriptionID return err } if instanceState != "VM running" { - return errors.Errorf("failed to get the azure instance '%v_%v' in running state, current state: %v", scaleSet, vm, instanceState) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("azure instance is not in running state, current state: %v", instanceState), + Target: fmt.Sprintf("{Azure Instance Name: %v_%v, Resource Group: %v}", scaleSet, vm, resourceGroup)} } } return nil diff --git a/pkg/cloud/gcp/disk-operations.go b/pkg/cloud/gcp/disk-operations.go index 9e89d1f9c..1850f7f6f 100644 --- a/pkg/cloud/gcp/disk-operations.go +++ b/pkg/cloud/gcp/disk-operations.go @@ -1,11 +1,12 @@ package gcp import ( + "fmt" "strings" + "github.com/litmuschaos/litmus-go/pkg/cerrors" experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/types" "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "google.golang.org/api/compute/v1" ) @@ -15,7 +16,7 @@ func DiskVolumeDetach(computeService *compute.Service, instanceName string, gcpP response, err := computeService.Instances.DetachDisk(gcpProjectID, zone, instanceName, deviceName).Do() if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{deviceName: %s, zone: %s}", deviceName, zone), Reason: err.Error()} } log.InfoWithValues("Detaching disk having:", logrus.Fields{ @@ -32,7 +33,7 @@ func DiskVolumeAttach(computeService *compute.Service, instanceName string, gcpP diskDetails, err := computeService.Disks.Get(gcpProjectID, zone, diskName).Do() if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{diskName: %s, zone: %s}", diskName, zone), Reason: err.Error()} } requestBody := &compute.AttachedDisk{ @@ -42,7 +43,7 @@ func DiskVolumeAttach(computeService *compute.Service, instanceName string, gcpP response, err := computeService.Instances.AttachDisk(gcpProjectID, zone, instanceName, requestBody).Do() if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{diskName: %s, zone: %s}", diskName, zone), Reason: err.Error()} } log.InfoWithValues("Attaching disk having:", logrus.Fields{ @@ -59,7 +60,7 @@ func GetVolumeAttachmentDetails(computeService *compute.Service, gcpProjectID st diskDetails, err := computeService.Disks.Get(gcpProjectID, zone, diskName).Do() if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{diskName: %s, zone: %s}", diskName, zone), Reason: err.Error()} } if len(diskDetails.Users) > 0 { @@ -71,7 +72,7 @@ func GetVolumeAttachmentDetails(computeService *compute.Service, gcpProjectID st return attachedInstanceName, nil } - return "", errors.Errorf("%s disk is not attached to any VM instance", diskName) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{diskName: %s, zone: %s}", diskName, zone), Reason: fmt.Sprintf("%s disk is not attached to any vm instance", diskName)} } // GetDiskDeviceNameForVM returns the device name for the target disk for a given VM @@ -79,7 +80,7 @@ func GetDiskDeviceNameForVM(computeService *compute.Service, targetDiskName, gcp instanceDetails, err := computeService.Instances.Get(gcpProjectID, zone, instanceName).Do() if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{diskName: %s, zone: %s}", targetDiskName, zone), Reason: err.Error()} } for _, disk := range instanceDetails.Disks { @@ -94,7 +95,7 @@ func GetDiskDeviceNameForVM(computeService *compute.Service, targetDiskName, gcp } } - return "", errors.Errorf("%s disk not found for %s vm instance", targetDiskName, instanceName) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{diskName: %s, zone: %s}", targetDiskName, zone), Reason: fmt.Sprintf("%s disk not found for %s vm instance", targetDiskName, instanceName)} } // SetTargetDiskVolumes will select the target disk volumes which are attached to some VM instance and filtered from the given label @@ -113,7 +114,7 @@ func SetTargetDiskVolumes(computeService *compute.Service, experimentsDetails *e response, err = computeService.Disks.List(experimentsDetails.GCPProjectID, experimentsDetails.Zones).Filter("labels." + experimentsDetails.DiskVolumeLabel + ":*").Do() } if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{label: %s, zone: %s}", experimentsDetails.DiskVolumeLabel, experimentsDetails.Zones), Reason: err.Error()} } for _, disk := range response.Items { @@ -123,7 +124,7 @@ func SetTargetDiskVolumes(computeService *compute.Service, experimentsDetails *e } if len(experimentsDetails.TargetDiskVolumeNamesList) == 0 { - return errors.Errorf("no attached disk volumes found with the label: %s", experimentsDetails.DiskVolumeLabel) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{label: %s, zone: %s}", experimentsDetails.DiskVolumeLabel, experimentsDetails.Zones), Reason: "no attached disk volumes found with the given label"} } log.InfoWithValues("[Info]: Targeting the attached disk volumes filtered from disk label", logrus.Fields{ diff --git a/pkg/cloud/gcp/disk-volume-status.go b/pkg/cloud/gcp/disk-volume-status.go index 4c1ecabd9..62ea0e939 100644 --- a/pkg/cloud/gcp/disk-volume-status.go +++ b/pkg/cloud/gcp/disk-volume-status.go @@ -1,13 +1,15 @@ package gcp import ( + "fmt" "strings" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" "google.golang.org/api/compute/v1" ) @@ -15,7 +17,7 @@ import ( // WaitForVolumeDetachment will wait for the disk volume to completely detach from a VM instance func WaitForVolumeDetachment(computeService *compute.Service, diskName, gcpProjectID, instanceName, zone string, delay, timeout int) error { - log.Info("[Status]: Checking disk volume status for detachment") + log.Infof("[Status]: Checking %s disk volume status for detachment", diskName) return retry. Times(uint(timeout / delay)). Wait(time.Duration(delay) * time.Second). @@ -23,15 +25,15 @@ func WaitForVolumeDetachment(computeService *compute.Service, diskName, gcpProje volumeState, err := GetDiskVolumeState(computeService, diskName, gcpProjectID, instanceName, zone) if err != nil { - return errors.Errorf("failed to get the volume state") + return stacktrace.Propagate(err, "failed to get the volume state") } if volumeState != "detached" { log.Infof("[Info]: The volume state is %v", volumeState) - return errors.Errorf("volume is not yet in detached state") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{diskName: %s, zone: %s}", diskName, zone), Reason: "volume is not yet in detached state"} } - log.Infof("[Info]: The volume state is %v", volumeState) + log.Infof("[Info]: %s volume state is %v", diskName, volumeState) return nil }) } @@ -39,7 +41,7 @@ func WaitForVolumeDetachment(computeService *compute.Service, diskName, gcpProje // WaitForVolumeAttachment will wait for the disk volume to get attached to a VM instance func WaitForVolumeAttachment(computeService *compute.Service, diskName, gcpProjectID, instanceName, zone string, delay, timeout int) error { - log.Info("[Status]: Checking disk volume status for attachment") + log.Infof("[Status]: Checking %s disk volume status for attachment", diskName) return retry. Times(uint(timeout / delay)). Wait(time.Duration(delay) * time.Second). @@ -47,15 +49,15 @@ func WaitForVolumeAttachment(computeService *compute.Service, diskName, gcpProje volumeState, err := GetDiskVolumeState(computeService, diskName, gcpProjectID, instanceName, zone) if err != nil { - return errors.Errorf("failed to get the volume status") + return stacktrace.Propagate(err, "failed to get the volume status") } if volumeState != "attached" { log.Infof("[Info]: The volume state is %v", volumeState) - return errors.Errorf("volume is not yet in attached state") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{diskName: %s, zone: %s}", diskName, zone), Reason: "volume is not yet in attached state"} } - log.Infof("[Info]: The volume state is %v", volumeState) + log.Infof("[Info]: %s volume state is %v", diskName, volumeState) return nil }) } @@ -65,7 +67,7 @@ func GetDiskVolumeState(computeService *compute.Service, diskName, gcpProjectID, diskDetails, err := computeService.Disks.Get(gcpProjectID, zone, diskName).Do() if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{diskName: %s, zone: %s}", diskName, zone), Reason: err.Error()} } for _, user := range diskDetails.Users { @@ -100,27 +102,27 @@ func GetDiskVolumeState(computeService *compute.Service, diskName, gcpProjectID, func DiskVolumeStateCheck(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails) error { if experimentsDetails.GCPProjectID == "" { - return errors.Errorf("no gcp project id provided, please provide the project id") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{projectId: %s}", experimentsDetails.GCPProjectID), Reason: "no gcp project id provided, please provide the project id"} } diskNamesList := strings.Split(experimentsDetails.DiskVolumeNames, ",") if len(diskNamesList) == 0 { - return errors.Errorf("no disk name provided, please provide the name of the disk") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{diskNames: %v}", diskNamesList), Reason: "no disk name provided, please provide the name of the disk"} } zonesList := strings.Split(experimentsDetails.Zones, ",") if len(zonesList) == 0 { - return errors.Errorf("no zone provided, please provide the zone of the disk") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{zones: %v}", zonesList), Reason: "no zone provided, please provide the zone of the disk"} } if len(diskNamesList) != len(zonesList) { - return errors.Errorf("unequal number of disk names and zones found, please verify the input details") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{diskNames: %v, zones: %v}", diskNamesList, zonesList), Reason: "unequal number of disk names and zones found, please verify the input details"} } for i := range diskNamesList { instanceName, err := GetVolumeAttachmentDetails(computeService, experimentsDetails.GCPProjectID, zonesList[i], diskNamesList[i]) if err != nil || instanceName == "" { - return errors.Errorf("failed to get the vm instance name for %s disk volume, err: %v", diskNamesList[i], err) + return stacktrace.Propagate(err, "failed to get the vm instance name for disk volume") } } @@ -136,7 +138,7 @@ func SetTargetDiskInstanceNames(computeService *compute.Service, experimentsDeta for i := range diskNamesList { instanceName, err := GetVolumeAttachmentDetails(computeService, experimentsDetails.GCPProjectID, zonesList[i], diskNamesList[i]) if err != nil || instanceName == "" { - return errors.Errorf("failed to get the vm instance name for %s disk volume, err: %v", diskNamesList[i], err) + return stacktrace.Propagate(err, "failed to get the vm instance name for disk volume") } experimentsDetails.TargetDiskInstanceNamesList = append(experimentsDetails.TargetDiskInstanceNamesList, instanceName) diff --git a/pkg/cloud/gcp/get-credentials-json.go b/pkg/cloud/gcp/get-credentials-json.go index af2c6a87b..033b174df 100644 --- a/pkg/cloud/gcp/get-credentials-json.go +++ b/pkg/cloud/gcp/get-credentials-json.go @@ -3,9 +3,11 @@ package gcp import ( "context" "encoding/json" + "fmt" "os" "strings" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/log" "google.golang.org/api/compute/v1" "google.golang.org/api/option" @@ -30,7 +32,7 @@ func getFileContent(filePath string) (string, error) { fileContentByteSlice, err := os.ReadFile(filePath) if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to read file %s, %s", filePath, err.Error())} } fileContentString := string(fileContentByteSlice) @@ -100,7 +102,7 @@ func getServiceAccountJSONFromSecret() ([]byte, error) { byteSliceJSONString, err := json.Marshal(credentials) if err != nil { - return nil, err + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to marshal the credentials into json, %s", err.Error())} } return byteSliceJSONString, nil @@ -135,7 +137,7 @@ func GetGCPComputeService() (*compute.Service, error) { // create a new GCP Compute Service client using the GCP service account credentials provided through the secret computeService, err := compute.NewService(ctx, option.WithCredentialsJSON(json)) if err != nil { - return nil, err + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to authenticate a new compute service using the given credentials, %s", err.Error())} } return computeService, nil @@ -147,7 +149,7 @@ func GetGCPComputeService() (*compute.Service, error) { // create a new GCP Compute Service client using default GCP service account credentials (using Workload Identity) computeService, err := compute.NewService(ctx) if err != nil { - return nil, err + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to authenticate a new compute service using gke workload identity, %s", err.Error())} } return computeService, nil diff --git a/pkg/cloud/gcp/vm-instance-status.go b/pkg/cloud/gcp/vm-instance-status.go index bcf091cb6..70a782cb5 100644 --- a/pkg/cloud/gcp/vm-instance-status.go +++ b/pkg/cloud/gcp/vm-instance-status.go @@ -1,10 +1,11 @@ package gcp import ( + "fmt" "strings" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/pkg/errors" "google.golang.org/api/compute/v1" ) @@ -16,7 +17,7 @@ func GetVMInstanceStatus(computeService *compute.Service, instanceName string, g // get information about the requisite VM instance response, err := computeService.Instances.Get(gcpProjectID, instanceZone, instanceName).Do() if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{vmName: %s, zone: %s}", instanceName, instanceZone), Reason: err.Error()} } // return the VM status @@ -31,15 +32,15 @@ func InstanceStatusCheckByName(computeService *compute.Service, managedInstanceG instanceZonesList := strings.Split(instanceZones, ",") if managedInstanceGroup != "enable" && managedInstanceGroup != "disable" { - return errors.Errorf("invalid value for MANAGED_INSTANCE_GROUP: %v", managedInstanceGroup) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{vmNames: %s, zones: %s}", instanceNamesList, instanceZonesList), Reason: fmt.Sprintf("invalid value for MANAGED_INSTANCE_GROUP: %s", managedInstanceGroup)} } if len(instanceNamesList) == 0 { - return errors.Errorf("no vm instance name found to stop") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{vmNames: %v}", instanceNamesList), Reason: "no vm instance name found to stop"} } if len(instanceNamesList) != len(instanceZonesList) { - return errors.Errorf("the number of vm instance names and the number of regions are not equal") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{vmNames: %v, zones: %v}", instanceNamesList, instanceZonesList), Reason: "unequal number of vm instance names and zones"} } log.Infof("[Info]: The vm instances under chaos (IUC) are: %v", instanceNamesList) @@ -65,7 +66,7 @@ func InstanceStatusCheck(computeService *compute.Service, instanceNamesList []st } if instanceState != "RUNNING" { - return errors.Errorf("%s vm instance is not in RUNNING state, current state: %v", instanceNamesList[i], instanceState) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{vmName: %s, zone: %s}", instanceNamesList[i], zone), Reason: fmt.Sprintf("vm instance is not in RUNNING state, current state: %s", instanceState)} } } diff --git a/pkg/cloud/gcp/vm-operations.go b/pkg/cloud/gcp/vm-operations.go index 5865cb82e..c5d687f2d 100644 --- a/pkg/cloud/gcp/vm-operations.go +++ b/pkg/cloud/gcp/vm-operations.go @@ -1,13 +1,15 @@ package gcp import ( + "fmt" "strings" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-instance-stop/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" "google.golang.org/api/compute/v1" ) @@ -18,7 +20,7 @@ func VMInstanceStop(computeService *compute.Service, instanceName string, gcpPro // stop the requisite VM instance _, err := computeService.Instances.Stop(gcpProjectID, instanceZone, instanceName).Do() if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{vmName: %s, zone: %s}", instanceName, instanceZone), Reason: err.Error()} } log.InfoWithValues("Stopping VM instance:", logrus.Fields{ @@ -34,7 +36,7 @@ func VMInstanceStart(computeService *compute.Service, instanceName string, gcpPr // start the requisite VM instance _, err := computeService.Instances.Start(gcpProjectID, instanceZone, instanceName).Do() if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{vmName: %s, zone: %s}", instanceName, instanceZone), Reason: err.Error()} } log.InfoWithValues("Starting VM instance:", logrus.Fields{ @@ -48,7 +50,7 @@ func VMInstanceStart(computeService *compute.Service, instanceName string, gcpPr // WaitForVMInstanceDown will wait for the VM instance to attain the TERMINATED status func WaitForVMInstanceDown(computeService *compute.Service, timeout int, delay int, instanceName string, gcpProjectID string, instanceZone string) error { - log.Info("[Status]: Checking VM instance status") + log.Infof("[Status]: Checking %s VM instance status", instanceName) return retry. Times(uint(timeout / delay)). @@ -57,13 +59,13 @@ func WaitForVMInstanceDown(computeService *compute.Service, timeout int, delay i instanceState, err := GetVMInstanceStatus(computeService, instanceName, gcpProjectID, instanceZone) if err != nil { - return errors.Errorf("failed to get the %s vm instance status", instanceName) + return stacktrace.Propagate(err, "failed to get the vm instance status") } log.Infof("The %s vm instance state is %v", instanceName, instanceState) if instanceState != "TERMINATED" { - return errors.Errorf("%s vm instance is not yet in stopped state", instanceName) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{vmName: %s, zone: %s}", instanceName, instanceZone), Reason: "vm instance is not yet in stopped state"} } return nil @@ -73,7 +75,7 @@ func WaitForVMInstanceDown(computeService *compute.Service, timeout int, delay i // WaitForVMInstanceUp will wait for the VM instance to attain the RUNNING status func WaitForVMInstanceUp(computeService *compute.Service, timeout int, delay int, instanceName string, gcpProjectID string, instanceZone string) error { - log.Info("[Status]: Checking VM instance status") + log.Infof("[Status]: Checking %s VM instance status", instanceName) return retry. Times(uint(timeout / delay)). @@ -82,13 +84,13 @@ func WaitForVMInstanceUp(computeService *compute.Service, timeout int, delay int instanceState, err := GetVMInstanceStatus(computeService, instanceName, gcpProjectID, instanceZone) if err != nil { - return errors.Errorf("failed to get the %s vm instance status", instanceName) + return stacktrace.Propagate(err, "failed to get the vm instance status") } log.Infof("The %s vm instance state is %v", instanceName, instanceState) if instanceState != "RUNNING" { - return errors.Errorf("%s vm instance is not yet in running state", instanceName) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{vmName: %s, zone: %s}", instanceName, instanceZone), Reason: "vm instance is not yet in running state"} } return nil @@ -104,7 +106,7 @@ func SetTargetInstance(computeService *compute.Service, experimentsDetails *expe ) if experimentsDetails.InstanceLabel == "" { - return errors.Errorf("label not found, please provide a valid label") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{label: %s}", experimentsDetails.InstanceLabel), Reason: "label not found, please provide a valid label"} } if strings.Contains(experimentsDetails.InstanceLabel, ":") { @@ -116,6 +118,7 @@ func SetTargetInstance(computeService *compute.Service, experimentsDetails *expe } if err != nil { return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{label: %s, zone: %s}", experimentsDetails.InstanceLabel, experimentsDetails.Zones), Reason: err.Error()} } for _, instance := range response.Items { @@ -125,7 +128,7 @@ func SetTargetInstance(computeService *compute.Service, experimentsDetails *expe } if len(experimentsDetails.TargetVMInstanceNameList) == 0 { - return errors.Errorf("no RUNNING VM instances found with the label: %s", experimentsDetails.InstanceLabel) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{label: %s, zone: %s}", experimentsDetails.InstanceLabel, experimentsDetails.Zones), Reason: "no running vm instances found with the given label"} } log.InfoWithValues("[Info]: Targeting the RUNNING VM instances filtered from instance label", logrus.Fields{ diff --git a/pkg/cloud/vmware/get-vcenter-cookie.go b/pkg/cloud/vmware/get-vcenter-cookie.go index 596ef8f36..0c9ccf8af 100644 --- a/pkg/cloud/vmware/get-vcenter-cookie.go +++ b/pkg/cloud/vmware/get-vcenter-cookie.go @@ -3,10 +3,11 @@ package vmware import ( "crypto/tls" "encoding/json" - "io/ioutil" + "fmt" + "io" "net/http" - "github.com/pkg/errors" + "github.com/litmuschaos/litmus-go/pkg/cerrors" ) // ErrorResponse contains error response code @@ -18,7 +19,7 @@ type ErrorResponse struct { } `json:"value"` } -//GetVcenterSessionID returns the vcenter sessionid +// GetVcenterSessionID returns the vcenter sessionid func GetVcenterSessionID(vcenterServer, vcenterUser, vcenterPass string) (string, error) { type Cookie struct { @@ -27,7 +28,7 @@ func GetVcenterSessionID(vcenterServer, vcenterUser, vcenterPass string) (string req, err := http.NewRequest("POST", "https://"+vcenterServer+"/rest/com/vmware/cis/session", nil) if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get vcenter session id: %v", err.Error())} } req.Header.Set("Content-Type", "application/json") @@ -39,25 +40,42 @@ func GetVcenterSessionID(vcenterServer, vcenterUser, vcenterPass string) (string client := &http.Client{Transport: tr} resp, err := client.Do(req) if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get vcenter session id: %v", err.Error())} } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get vcenter session id: %v", err.Error())} } if resp.StatusCode != http.StatusOK { var errorResponse ErrorResponse - json.Unmarshal(body, &errorResponse) - return "", errors.Errorf("error during authentication: %s", errorResponse.MsgValue.MsgMessages[0].MsgDefaultMessage) + var reason string + + err = json.Unmarshal(body, &errorResponse) + if err != nil { + reason = fmt.Sprintf("failed to unmarshal error response: %v", err) + } else { + reason = fmt.Sprintf("error during authentication: %v", errorResponse.MsgValue.MsgMessages[0].MsgDefaultMessage) + } + + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeGeneric, + Reason: reason, + } } var cookie Cookie - json.Unmarshal(body, &cookie) + + if err = json.Unmarshal(body, &cookie); err != nil { + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to unmarshal cookie: %v", err), + } + } login := "vmware-api-session-id=" + cookie.MsgValue + ";Path=/rest;Secure;HttpOnly" return login, nil diff --git a/pkg/cloud/vmware/vm-operations.go b/pkg/cloud/vmware/vm-operations.go index 97972bcac..501525a52 100644 --- a/pkg/cloud/vmware/vm-operations.go +++ b/pkg/cloud/vmware/vm-operations.go @@ -3,21 +3,27 @@ package vmware import ( "crypto/tls" "encoding/json" - "io/ioutil" + "fmt" + "io" "net/http" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" ) -//StartVM starts a given powered-off VM +// StartVM starts a given powered-off VM func StartVM(vcenterServer, vmId, cookie string) error { req, err := http.NewRequest("POST", "https://"+vcenterServer+"/rest/vcenter/vm/"+vmId+"/power/start", nil) if err != nil { - return err + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("failed to start VM: %v", err.Error()), + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } req.Header.Set("Content-Type", "application/json") @@ -29,32 +35,56 @@ func StartVM(vcenterServer, vmId, cookie string) error { client := &http.Client{Transport: tr} resp, err := client.Do(req) if err != nil { - return err + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("failed to start VM: %v", err.Error()), + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { - return err + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: fmt.Sprintf("failed to start VM: %v", err.Error()), + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } var errorResponse ErrorResponse - json.Unmarshal(body, &errorResponse) - return errors.Errorf("failed to start vm: %s", errorResponse.MsgValue.MsgMessages[0].MsgDefaultMessage) + var reason string + + err = json.Unmarshal(body, &errorResponse) + if err != nil { + reason = fmt.Sprintf("failed to unmarshal error response: %v", err) + } else { + reason = fmt.Sprintf("failed to start VM: %v", errorResponse.MsgValue.MsgMessages[0].MsgDefaultMessage) + } + + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: reason, + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } return nil } -//StopVM stops a given powered-on VM +// StopVM stops a given powered-on VM func StopVM(vcenterServer, vmId, cookie string) error { req, err := http.NewRequest("POST", "https://"+vcenterServer+"/rest/vcenter/vm/"+vmId+"/power/stop", nil) if err != nil { - return err + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to stop VM: %v", err.Error()), + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } req.Header.Set("Content-Type", "application/json") @@ -66,68 +96,95 @@ func StopVM(vcenterServer, vmId, cookie string) error { client := &http.Client{Transport: tr} resp, err := client.Do(req) if err != nil { - return err + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to stop VM: %v", err.Error()), + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { - return err + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to stop VM: %v", err.Error()), + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } var errorResponse ErrorResponse - json.Unmarshal(body, &errorResponse) - return errors.Errorf("failed to stop vm: %s", errorResponse.MsgValue.MsgMessages[0].MsgDefaultMessage) + var reason string + + err = json.Unmarshal(body, &errorResponse) + if err != nil { + reason = fmt.Sprintf("failed to unmarshal error response: %v", err) + } else { + reason = fmt.Sprintf("failed to stop VM: %v", errorResponse.MsgValue.MsgMessages[0].MsgDefaultMessage) + } + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: reason, + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } return nil } -//WaitForVMStart waits for the given VM to attain the POWERED_ON state +// WaitForVMStart waits for the given VM to attain the POWERED_ON state func WaitForVMStart(timeout, delay int, vcenterServer, vmId, cookie string) error { - log.Infof("[Status]: Checking %s VM status", vmId) + log.Infof("[Status]: Checking %v VM status", vmId) return retry.Times(uint(timeout / delay)). Wait(time.Duration(delay) * time.Second). Try(func(attempt uint) error { vmStatus, err := GetVMStatus(vcenterServer, vmId, cookie) if err != nil { - return errors.Errorf("failed to get %s VM status: %s", vmId, err.Error()) + return stacktrace.Propagate(err, "failed to get VM status") } if vmStatus != "POWERED_ON" { - log.Infof("%s VM state is %s", vmId, vmStatus) - return errors.Errorf("%s vm is not yet in POWERED_ON state", vmId) + log.Infof("%v VM state is %v", vmId, vmStatus) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosRevert, + Reason: "VM is not in POWERED_ON state", + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } - log.Infof("%s VM state is %s", vmId, vmStatus) + log.Infof("%v VM state is %v", vmId, vmStatus) return nil }) } -//WaitForVMStop waits for the given VM to attain the POWERED_OFF state +// WaitForVMStop waits for the given VM to attain the POWERED_OFF state func WaitForVMStop(timeout, delay int, vcenterServer, vmId, cookie string) error { - log.Infof("[Status]: Checking %s VM status", vmId) + log.Infof("[Status]: Checking %v VM status", vmId) return retry.Times(uint(timeout / delay)). Wait(time.Duration(delay) * time.Second). Try(func(attempt uint) error { vmStatus, err := GetVMStatus(vcenterServer, vmId, cookie) if err != nil { - return errors.Errorf("failed to get %s VM status: %s", vmId, err.Error()) + return stacktrace.Propagate(err, "failed to get VM status") } if vmStatus != "POWERED_OFF" { - log.Infof("%s VM state is %s", vmId, vmStatus) - return errors.Errorf("%s vm is not yet in POWERED_OFF state", vmId) + log.Infof("%v VM state is %v", vmId, vmStatus) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: "VM is not in POWERED_OFF state", + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } - log.Infof("%s VM state is %s", vmId, vmStatus) + log.Infof("%v VM state is %v", vmId, vmStatus) return nil }) } diff --git a/pkg/cloud/vmware/vm-status.go b/pkg/cloud/vmware/vm-status.go index 0a98b4eee..9f5bf52cd 100644 --- a/pkg/cloud/vmware/vm-status.go +++ b/pkg/cloud/vmware/vm-status.go @@ -3,14 +3,16 @@ package vmware import ( "crypto/tls" "encoding/json" - "io/ioutil" + "fmt" + "io" "net/http" "strings" - "github.com/pkg/errors" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" ) -//GetVMStatus returns the current status of a given VM +// GetVMStatus returns the current status of a given VM func GetVMStatus(vcenterServer, vmId, cookie string) (string, error) { type VMStatus struct { @@ -21,7 +23,10 @@ func GetVMStatus(vcenterServer, vmId, cookie string) (string, error) { req, err := http.NewRequest("GET", "https://"+vcenterServer+"/rest/vcenter/vm/"+vmId+"/power/", nil) if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to get VM status: %v", err.Error()), + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } req.Header.Set("Content-Type", "application/json") @@ -33,46 +38,79 @@ func GetVMStatus(vcenterServer, vmId, cookie string) (string, error) { client := &http.Client{Transport: tr} resp, err := client.Do(req) if err != nil { - return "", err + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to get VM status: %v", err.Error()), + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { - return "", err + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to get VM status: %v", err.Error()), + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } if resp.StatusCode != http.StatusOK { var errorResponse ErrorResponse - json.Unmarshal(body, &errorResponse) - return "", errors.Errorf("failed to fetch vm status: %s", errorResponse.MsgValue.MsgMessages[0].MsgDefaultMessage) + var reason string + + err = json.Unmarshal(body, &errorResponse) + if err != nil { + reason = fmt.Sprintf("failed to unmarshal error response: %v", err) + } else { + reason = fmt.Sprintf("failed to fetch VM status: %v", errorResponse.MsgValue.MsgMessages[0].MsgDefaultMessage) + } + + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: reason, + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } var vmStatus VMStatus - json.Unmarshal(body, &vmStatus) + if err = json.Unmarshal(body, &vmStatus); err != nil { + return "", cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: fmt.Sprintf("failed to unmarshal VM status: %v", err), + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } + } return vmStatus.MsgValue.MsgState, nil } -//VMStatusCheck validates the steady state for the given vm ids +// VMStatusCheck validates the steady state for the given vm ids func VMStatusCheck(vcenterServer, vmIds, cookie string) error { vmIdList := strings.Split(vmIds, ",") - if len(vmIdList) == 0 { - return errors.Errorf("no vm received, please input the target VMMoids") + if vmIds == "" || len(vmIdList) == 0 { + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "no VMoid found, please provide target VMMoids", + } } for _, vmId := range vmIdList { vmStatus, err := GetVMStatus(vcenterServer, vmId, cookie) if err != nil { - return errors.Errorf("failed to get status of %s vm: %s", vmId, err.Error()) + return stacktrace.Propagate(err, "failed to get status of VM") } if vmStatus != "POWERED_ON" { - return errors.Errorf("%s vm is not powered-on", vmId) + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeStatusChecks, + Reason: "VM is not in POWERED_ON state", + Target: fmt.Sprintf("{VM ID: %v}", vmId), + } } } diff --git a/pkg/gcp/gcp-vm-disk-loss/environment/environment.go b/pkg/gcp/gcp-vm-disk-loss/environment/environment.go index 854e39188..610dc2d60 100644 --- a/pkg/gcp/gcp-vm-disk-loss/environment/environment.go +++ b/pkg/gcp/gcp-vm-disk-loss/environment/environment.go @@ -17,7 +17,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/gcp/gcp-vm-disk-loss/types/types.go b/pkg/gcp/gcp-vm-disk-loss/types/types.go index c46dba7b1..a41c172de 100644 --- a/pkg/gcp/gcp-vm-disk-loss/types/types.go +++ b/pkg/gcp/gcp-vm-disk-loss/types/types.go @@ -11,7 +11,6 @@ type ExperimentDetails struct { ChaosDuration int ChaosInterval int RampTime int - ChaosLib string ChaosUID clientTypes.UID InstanceID string ChaosNamespace string diff --git a/pkg/gcp/gcp-vm-instance-stop/environment/environment.go b/pkg/gcp/gcp-vm-instance-stop/environment/environment.go index 43ce56f25..e8b49a14c 100644 --- a/pkg/gcp/gcp-vm-instance-stop/environment/environment.go +++ b/pkg/gcp/gcp-vm-instance-stop/environment/environment.go @@ -17,7 +17,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/gcp/gcp-vm-instance-stop/types/types.go b/pkg/gcp/gcp-vm-instance-stop/types/types.go index 765178389..d66da0772 100644 --- a/pkg/gcp/gcp-vm-instance-stop/types/types.go +++ b/pkg/gcp/gcp-vm-instance-stop/types/types.go @@ -11,7 +11,6 @@ type ExperimentDetails struct { ChaosDuration int ChaosInterval int RampTime int - ChaosLib string ChaosUID clientTypes.UID InstanceID string ChaosNamespace string diff --git a/pkg/generic/container-kill/environment/environment.go b/pkg/generic/container-kill/environment/environment.go index dbd7fc640..b3b3f1529 100644 --- a/pkg/generic/container-kill/environment/environment.go +++ b/pkg/generic/container-kill/environment/environment.go @@ -17,7 +17,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "20")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "10")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/container-kill/types/types.go b/pkg/generic/container-kill/types/types.go index 59a95a403..5c882e890 100644 --- a/pkg/generic/container-kill/types/types.go +++ b/pkg/generic/container-kill/types/types.go @@ -11,7 +11,6 @@ type ExperimentDetails struct { ChaosDuration int ChaosInterval int RampTime int - ChaosLib string AppNS string AppLabel string AppKind string diff --git a/pkg/generic/disk-fill/environment/environment.go b/pkg/generic/disk-fill/environment/environment.go index fd31dcc5c..e490648dc 100644 --- a/pkg/generic/disk-fill/environment/environment.go +++ b/pkg/generic/disk-fill/environment/environment.go @@ -16,7 +16,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/disk-fill/types/types.go b/pkg/generic/disk-fill/types/types.go index 0dd0f6bdf..124ed9782 100644 --- a/pkg/generic/disk-fill/types/types.go +++ b/pkg/generic/disk-fill/types/types.go @@ -10,7 +10,6 @@ type ExperimentDetails struct { EngineName string ChaosDuration int RampTime int - ChaosLib string AppNS string AppLabel string AppKind string diff --git a/pkg/generic/docker-service-kill/environment/environment.go b/pkg/generic/docker-service-kill/environment/environment.go index fd64b5346..6e360d9ed 100644 --- a/pkg/generic/docker-service-kill/environment/environment.go +++ b/pkg/generic/docker-service-kill/environment/environment.go @@ -16,7 +16,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "90")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/docker-service-kill/types/types.go b/pkg/generic/docker-service-kill/types/types.go index cb520d0e4..3b3edf2c1 100644 --- a/pkg/generic/docker-service-kill/types/types.go +++ b/pkg/generic/docker-service-kill/types/types.go @@ -10,7 +10,6 @@ type ExperimentDetails struct { EngineName string ChaosDuration int RampTime int - ChaosLib string AppNS string AppLabel string AppKind string diff --git a/pkg/generic/http-chaos/environment/environment.go b/pkg/generic/http-chaos/environment/environment.go index 5b3c9f7b8..1685e3920 100644 --- a/pkg/generic/http-chaos/environment/environment.go +++ b/pkg/generic/http-chaos/environment/environment.go @@ -15,7 +15,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails, expName string experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.LIBImage = types.Getenv("LIB_IMAGE", "litmuschaos/go-runner:latest") diff --git a/pkg/generic/http-chaos/types/types.go b/pkg/generic/http-chaos/types/types.go index 9b4b2f121..c0653168b 100644 --- a/pkg/generic/http-chaos/types/types.go +++ b/pkg/generic/http-chaos/types/types.go @@ -12,7 +12,6 @@ type ExperimentDetails struct { LIBImage string LIBImagePullPolicy string RampTime int - ChaosLib string AppNS string AppLabel string AppKind string diff --git a/pkg/generic/kubelet-service-kill/environment/environment.go b/pkg/generic/kubelet-service-kill/environment/environment.go index 0fecf1a7a..6d524fa49 100644 --- a/pkg/generic/kubelet-service-kill/environment/environment.go +++ b/pkg/generic/kubelet-service-kill/environment/environment.go @@ -16,7 +16,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "90")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/kubelet-service-kill/types/types.go b/pkg/generic/kubelet-service-kill/types/types.go index cb520d0e4..3b3edf2c1 100644 --- a/pkg/generic/kubelet-service-kill/types/types.go +++ b/pkg/generic/kubelet-service-kill/types/types.go @@ -10,7 +10,6 @@ type ExperimentDetails struct { EngineName string ChaosDuration int RampTime int - ChaosLib string AppNS string AppLabel string AppKind string diff --git a/pkg/generic/network-chaos/environment/environment.go b/pkg/generic/network-chaos/environment/environment.go index 71db254eb..d98ef50c4 100644 --- a/pkg/generic/network-chaos/environment/environment.go +++ b/pkg/generic/network-chaos/environment/environment.go @@ -15,7 +15,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails, expName string experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.LIBImage = types.Getenv("LIB_IMAGE", "litmuschaos/go-runner:latest") @@ -23,7 +22,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails, expName string experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") experimentDetails.NetworkInterface = types.Getenv("NETWORK_INTERFACE", "eth0") experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "") - experimentDetails.TCImage = types.Getenv("TC_IMAGE", "gaiadocker/iproute2") experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2")) experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180")) experimentDetails.TargetPods = types.Getenv("TARGET_PODS", "") diff --git a/pkg/generic/network-chaos/types/types.go b/pkg/generic/network-chaos/types/types.go index 08ed85f5f..b76e9c1de 100644 --- a/pkg/generic/network-chaos/types/types.go +++ b/pkg/generic/network-chaos/types/types.go @@ -12,7 +12,6 @@ type ExperimentDetails struct { LIBImage string LIBImagePullPolicy string RampTime int - ChaosLib string AppNS string AppLabel string AppKind string @@ -27,7 +26,6 @@ type ExperimentDetails struct { NetworkLatency int NetworkPacketLossPercentage string NetworkPacketCorruptionPercentage string - TCImage string Timeout int Delay int TargetPods string diff --git a/pkg/generic/node-cpu-hog/environment/environment.go b/pkg/generic/node-cpu-hog/environment/environment.go index e8e2d9bb8..75e0b6d9f 100644 --- a/pkg/generic/node-cpu-hog/environment/environment.go +++ b/pkg/generic/node-cpu-hog/environment/environment.go @@ -15,7 +15,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/node-cpu-hog/types/types.go b/pkg/generic/node-cpu-hog/types/types.go index 3982d39aa..08ab69448 100644 --- a/pkg/generic/node-cpu-hog/types/types.go +++ b/pkg/generic/node-cpu-hog/types/types.go @@ -10,7 +10,6 @@ type ExperimentDetails struct { EngineName string ChaosDuration int RampTime int - ChaosLib string ChaosUID clientTypes.UID TerminationGracePeriodSeconds int InstanceID string diff --git a/pkg/generic/node-drain/environment/environment.go b/pkg/generic/node-drain/environment/environment.go index b9287fb61..46065ba1e 100644 --- a/pkg/generic/node-drain/environment/environment.go +++ b/pkg/generic/node-drain/environment/environment.go @@ -16,7 +16,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/node-drain/types/types.go b/pkg/generic/node-drain/types/types.go index 722cc7f82..dc9049835 100644 --- a/pkg/generic/node-drain/types/types.go +++ b/pkg/generic/node-drain/types/types.go @@ -10,7 +10,6 @@ type ExperimentDetails struct { EngineName string ChaosDuration int RampTime int - ChaosLib string AppNS string AppLabel string AppKind string diff --git a/pkg/generic/node-io-stress/environment/environment.go b/pkg/generic/node-io-stress/environment/environment.go index c9e21209f..ad229c360 100644 --- a/pkg/generic/node-io-stress/environment/environment.go +++ b/pkg/generic/node-io-stress/environment/environment.go @@ -15,7 +15,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "120")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/node-io-stress/types/types.go b/pkg/generic/node-io-stress/types/types.go index 97ed5aa12..9c784b10e 100644 --- a/pkg/generic/node-io-stress/types/types.go +++ b/pkg/generic/node-io-stress/types/types.go @@ -10,7 +10,6 @@ type ExperimentDetails struct { EngineName string ChaosDuration int RampTime int - ChaosLib string ChaosUID clientTypes.UID InstanceID string TerminationGracePeriodSeconds int diff --git a/pkg/generic/node-memory-hog/environment/environment.go b/pkg/generic/node-memory-hog/environment/environment.go index 56a9798f0..d914351a5 100644 --- a/pkg/generic/node-memory-hog/environment/environment.go +++ b/pkg/generic/node-memory-hog/environment/environment.go @@ -15,7 +15,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/node-memory-hog/types/types.go b/pkg/generic/node-memory-hog/types/types.go index acbd75f4f..065b943c6 100644 --- a/pkg/generic/node-memory-hog/types/types.go +++ b/pkg/generic/node-memory-hog/types/types.go @@ -10,7 +10,6 @@ type ExperimentDetails struct { EngineName string ChaosDuration int RampTime int - ChaosLib string ChaosUID clientTypes.UID TerminationGracePeriodSeconds int InstanceID string diff --git a/pkg/generic/node-restart/environment/environment.go b/pkg/generic/node-restart/environment/environment.go index d78e2bd2a..54fb4222c 100644 --- a/pkg/generic/node-restart/environment/environment.go +++ b/pkg/generic/node-restart/environment/environment.go @@ -15,7 +15,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/node-restart/types/types.go b/pkg/generic/node-restart/types/types.go index 89d0dd27f..82c8e2d04 100644 --- a/pkg/generic/node-restart/types/types.go +++ b/pkg/generic/node-restart/types/types.go @@ -10,7 +10,6 @@ type ExperimentDetails struct { EngineName string ChaosDuration int RampTime int - ChaosLib string AppNS string AppLabel string AppKind string diff --git a/pkg/generic/node-taint/environment/environment.go b/pkg/generic/node-taint/environment/environment.go index 209b5c5af..c04183c92 100644 --- a/pkg/generic/node-taint/environment/environment.go +++ b/pkg/generic/node-taint/environment/environment.go @@ -16,7 +16,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60")) experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/node-taint/types/types.go b/pkg/generic/node-taint/types/types.go index 9011f5f15..2fa69b4a0 100644 --- a/pkg/generic/node-taint/types/types.go +++ b/pkg/generic/node-taint/types/types.go @@ -10,7 +10,6 @@ type ExperimentDetails struct { EngineName string RampTime int ChaosDuration int - ChaosLib string AppNS string AppLabel string AppKind string diff --git a/pkg/generic/pod-autoscaler/environment/environment.go b/pkg/generic/pod-autoscaler/environment/environment.go index 8be42250a..1d80d5f2c 100644 --- a/pkg/generic/pod-autoscaler/environment/environment.go +++ b/pkg/generic/pod-autoscaler/environment/environment.go @@ -16,7 +16,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.AppAffectPercentage, _ = strconv.Atoi(types.Getenv("APP_AFFECT_PERC", "100")) experimentDetails.Replicas, _ = strconv.Atoi(types.Getenv("REPLICA_COUNT", "")) experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) diff --git a/pkg/generic/pod-autoscaler/types/types.go b/pkg/generic/pod-autoscaler/types/types.go index 34b63945e..34cab9629 100644 --- a/pkg/generic/pod-autoscaler/types/types.go +++ b/pkg/generic/pod-autoscaler/types/types.go @@ -11,7 +11,6 @@ type ExperimentDetails struct { ChaosDuration int RampTime int Replicas int - ChaosLib string AppNS string AppLabel string AppKind string diff --git a/pkg/generic/pod-cpu-hog-exec/environment/environment.go b/pkg/generic/pod-cpu-hog-exec/environment/environment.go index b536c76f5..3619db2bd 100644 --- a/pkg/generic/pod-cpu-hog-exec/environment/environment.go +++ b/pkg/generic/pod-cpu-hog-exec/environment/environment.go @@ -16,7 +16,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "10")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/pod-cpu-hog-exec/types/types.go b/pkg/generic/pod-cpu-hog-exec/types/types.go index fc69cedf3..0b4cf34f2 100644 --- a/pkg/generic/pod-cpu-hog-exec/types/types.go +++ b/pkg/generic/pod-cpu-hog-exec/types/types.go @@ -12,7 +12,6 @@ type ExperimentDetails struct { ChaosDuration int ChaosInterval int RampTime int - ChaosLib string ChaosUID clientTypes.UID InstanceID string ChaosNamespace string diff --git a/pkg/generic/pod-delete/environment/environment.go b/pkg/generic/pod-delete/environment/environment.go index ad4dc4417..dce477981 100644 --- a/pkg/generic/pod-delete/environment/environment.go +++ b/pkg/generic/pod-delete/environment/environment.go @@ -17,7 +17,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.ChaosInterval = types.Getenv("CHAOS_INTERVAL", "10") experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosServiceAccount = types.Getenv("CHAOS_SERVICE_ACCOUNT", "") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") diff --git a/pkg/generic/pod-delete/types/types.go b/pkg/generic/pod-delete/types/types.go index aa6cddc5f..ba95bcd57 100644 --- a/pkg/generic/pod-delete/types/types.go +++ b/pkg/generic/pod-delete/types/types.go @@ -12,7 +12,6 @@ type ExperimentDetails struct { ChaosInterval string RampTime int Force bool - ChaosLib string ChaosServiceAccount string AppNS string AppLabel string diff --git a/pkg/generic/pod-dns-chaos/environment/environment.go b/pkg/generic/pod-dns-chaos/environment/environment.go index 92433c677..a652c4c9b 100644 --- a/pkg/generic/pod-dns-chaos/environment/environment.go +++ b/pkg/generic/pod-dns-chaos/environment/environment.go @@ -24,7 +24,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails, expType DNSCha experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.LIBImage = types.Getenv("LIB_IMAGE", "litmuschaos/go-runner:latest") diff --git a/pkg/generic/pod-dns-chaos/types/types.go b/pkg/generic/pod-dns-chaos/types/types.go index 80fd59498..883d11fe3 100644 --- a/pkg/generic/pod-dns-chaos/types/types.go +++ b/pkg/generic/pod-dns-chaos/types/types.go @@ -12,7 +12,6 @@ type ExperimentDetails struct { LIBImage string LIBImagePullPolicy string RampTime int - ChaosLib string AppNS string AppLabel string AppKind string diff --git a/pkg/generic/pod-fio-stress/environment/environment.go b/pkg/generic/pod-fio-stress/environment/environment.go index 19102bc7b..9c2140c64 100644 --- a/pkg/generic/pod-fio-stress/environment/environment.go +++ b/pkg/generic/pod-fio-stress/environment/environment.go @@ -17,7 +17,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "10")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/pod-fio-stress/types/types.go b/pkg/generic/pod-fio-stress/types/types.go index 34d8c0142..b37755454 100644 --- a/pkg/generic/pod-fio-stress/types/types.go +++ b/pkg/generic/pod-fio-stress/types/types.go @@ -11,7 +11,6 @@ type ExperimentDetails struct { ChaosDuration int ChaosInterval int RampTime int - ChaosLib string ChaosUID clientTypes.UID InstanceID string ChaosNamespace string diff --git a/pkg/generic/pod-memory-hog-exec/environment/environment.go b/pkg/generic/pod-memory-hog-exec/environment/environment.go index fd46f9f58..4cfc97834 100644 --- a/pkg/generic/pod-memory-hog-exec/environment/environment.go +++ b/pkg/generic/pod-memory-hog-exec/environment/environment.go @@ -16,7 +16,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "10")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/generic/pod-memory-hog-exec/types/types.go b/pkg/generic/pod-memory-hog-exec/types/types.go index 5e6ade740..43075feeb 100644 --- a/pkg/generic/pod-memory-hog-exec/types/types.go +++ b/pkg/generic/pod-memory-hog-exec/types/types.go @@ -12,7 +12,6 @@ type ExperimentDetails struct { ChaosDuration int ChaosInterval int RampTime int - ChaosLib string ChaosUID clientTypes.UID InstanceID string ChaosNamespace string diff --git a/pkg/generic/pod-network-partition/environment/environment.go b/pkg/generic/pod-network-partition/environment/environment.go index 4e11d3919..a6b456696 100644 --- a/pkg/generic/pod-network-partition/environment/environment.go +++ b/pkg/generic/pod-network-partition/environment/environment.go @@ -16,7 +16,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.AppNS = types.Getenv("APP_NAMESPACE", "") experimentDetails.AppLabel = types.Getenv("APP_LABEL", "") experimentDetails.AppKind = types.Getenv("APP_KIND", "") diff --git a/pkg/generic/pod-network-partition/types/types.go b/pkg/generic/pod-network-partition/types/types.go index 09f761987..9e152f824 100644 --- a/pkg/generic/pod-network-partition/types/types.go +++ b/pkg/generic/pod-network-partition/types/types.go @@ -10,7 +10,6 @@ type ExperimentDetails struct { EngineName string ChaosDuration int RampTime int - ChaosLib string AppNS string AppLabel string AppKind string diff --git a/pkg/generic/stress-chaos/environment/environment.go b/pkg/generic/stress-chaos/environment/environment.go index 6c880a44a..28ef58d5a 100644 --- a/pkg/generic/stress-chaos/environment/environment.go +++ b/pkg/generic/stress-chaos/environment/environment.go @@ -15,7 +15,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails, expName string experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.LIBImage = types.Getenv("LIB_IMAGE", "litmuschaos/go-runner:latest") @@ -31,7 +30,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails, expName string experimentDetails.SocketPath = types.Getenv("SOCKET_PATH", "/var/run/docker.sock") experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel") experimentDetails.TerminationGracePeriodSeconds, _ = strconv.Atoi(types.Getenv("TERMINATION_GRACE_PERIOD_SECONDS", "")) - experimentDetails.StressImage = types.Getenv("STRESS_IMAGE", "alexeiled/stress-ng:latest-ubuntu") experimentDetails.NodeLabel = types.Getenv("NODE_LABEL", "") experimentDetails.SetHelperData = types.Getenv("SET_HELPER_DATA", "true") diff --git a/pkg/generic/stress-chaos/types/types.go b/pkg/generic/stress-chaos/types/types.go index ffb85aa63..25d1e89f9 100644 --- a/pkg/generic/stress-chaos/types/types.go +++ b/pkg/generic/stress-chaos/types/types.go @@ -12,7 +12,6 @@ type ExperimentDetails struct { LIBImage string LIBImagePullPolicy string RampTime int - ChaosLib string AppNS string AppLabel string AppKind string @@ -22,7 +21,6 @@ type ExperimentDetails struct { ChaosPodName string RunID string TargetContainer string - StressImage string Timeout int Delay int TargetPods string diff --git a/pkg/kafka/environment/environment.go b/pkg/kafka/environment/environment.go index 88b558963..0aca16c69 100644 --- a/pkg/kafka/environment/environment.go +++ b/pkg/kafka/environment/environment.go @@ -9,7 +9,7 @@ import ( clientTypes "k8s.io/apimachinery/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(kafkaDetails *kafkaTypes.ExperimentDetails) { var ChaoslibDetail exp.ExperimentDetails @@ -20,7 +20,6 @@ func GetENV(kafkaDetails *kafkaTypes.ExperimentDetails) { ChaoslibDetail.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60")) ChaoslibDetail.ChaosInterval = types.Getenv("CHAOS_INTERVAL", "10") ChaoslibDetail.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - ChaoslibDetail.ChaosLib = types.Getenv("LIB", "litmus") ChaoslibDetail.ChaosServiceAccount = types.Getenv("CHAOS_SERVICE_ACCOUNT", "") ChaoslibDetail.TargetContainer = types.Getenv("TARGET_CONTAINER", "") ChaoslibDetail.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) @@ -39,7 +38,7 @@ func GetENV(kafkaDetails *kafkaTypes.ExperimentDetails) { kafkaDetails.KafkaLivenessStream = types.Getenv("KAFKA_LIVENESS_STREAM", "enable") kafkaDetails.KafkaLivenessImage = types.Getenv("KAFKA_LIVENESS_IMAGE", "litmuschaos/kafka-client:latest") kafkaDetails.KafkaConsumerTimeout, _ = strconv.Atoi(types.Getenv("KAFKA_CONSUMER_TIMEOUT", "60000")) - kafkaDetails.KafkaInstanceName = types.Getenv("KAFKA_INSTANCE_NAME", "kafka") + kafkaDetails.KafkaInstanceName = types.Getenv("KAFKA_INSTANCE_NAME", "") kafkaDetails.KafkaNamespace = types.Getenv("KAFKA_NAMESPACE", "default") kafkaDetails.KafkaLabel = types.Getenv("KAFKA_LABEL", "") kafkaDetails.KafkaBroker = types.Getenv("KAFKA_BROKER", "") diff --git a/pkg/kafka/kafka-liveness-cleanup.go b/pkg/kafka/kafka-liveness-cleanup.go index d01e4f31c..79a71e68d 100644 --- a/pkg/kafka/kafka-liveness-cleanup.go +++ b/pkg/kafka/kafka-liveness-cleanup.go @@ -2,12 +2,13 @@ package kafka import ( "context" + "fmt" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/kafka/types" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -15,7 +16,7 @@ import ( func LivenessCleanup(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error { if err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaoslibDetail.AppNS).Delete(context.Background(), "kafka-liveness-"+experimentsDetails.RunID, metav1.DeleteOptions{}); err != nil { - return errors.Errorf("Fail to delete liveness deployment, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: fmt.Sprintf("fail to delete liveness deployment, %s", err.Error())} } return retry. @@ -24,9 +25,9 @@ func LivenessCleanup(experimentsDetails *experimentTypes.ExperimentDetails, clie Try(func(attempt uint) error { podSpec, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaoslibDetail.AppNS).List(context.Background(), metav1.ListOptions{LabelSelector: "name=kafka-liveness-" + experimentsDetails.RunID}) if err != nil { - return errors.Errorf("Liveness pod is not deleted yet, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: fmt.Sprintf("liveness pod is not deleted yet, %s", err.Error())} } else if len(podSpec.Items) != 0 { - return errors.Errorf("Liveness pod is not deleted yet") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: "liveness pod is not deleted yet"} } return nil }) diff --git a/pkg/kafka/kafka-liveness-stream.go b/pkg/kafka/kafka-liveness-stream.go index af2913100..636480876 100644 --- a/pkg/kafka/kafka-liveness-stream.go +++ b/pkg/kafka/kafka-liveness-stream.go @@ -2,16 +2,17 @@ package kafka import ( "context" + "fmt" "strconv" "strings" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/kafka/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/utils/common" litmusexec "github.com/litmuschaos/litmus-go/pkg/utils/exec" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -34,7 +35,7 @@ func LivenessStream(experimentsDetails *experimentTypes.ExperimentDetails, clien log.Info("[Liveness]: Confirm that the kafka liveness pod is running") if err := status.CheckApplicationStatusesByLabels(experimentsDetails.KafkaNamespace, "name=kafka-liveness-"+experimentsDetails.RunID, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil { - return "", errors.Errorf("liveness pod status check failed, err: %v", err) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("liveness pod status check failed, err: %v", err)} } log.Info("[Liveness]: Obtain the leader broker ordinality for the topic (partition) created by kafka-liveness") @@ -45,7 +46,7 @@ func LivenessStream(experimentsDetails *experimentTypes.ExperimentDetails, clien litmusexec.SetExecCommandAttributes(&execCommandDetails, "kafka-liveness-"+experimentsDetails.RunID, "kafka-consumer", experimentsDetails.KafkaNamespace) ordinality, err = litmusexec.Exec(&execCommandDetails, clients, command) if err != nil { - return "", errors.Errorf("unable to get ordinality details, err: %v", err) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("unable to get ordinality details, err: %v", err)} } } else { // It will contains all the pod & container details required for exec command @@ -55,14 +56,14 @@ func LivenessStream(experimentsDetails *experimentTypes.ExperimentDetails, clien litmusexec.SetExecCommandAttributes(&execCommandDetails, "kafka-liveness-"+experimentsDetails.RunID, "kafka-consumer", experimentsDetails.KafkaNamespace) ordinality, err = litmusexec.Exec(&execCommandDetails, clients, command) if err != nil { - return "", errors.Errorf("unable to get ordinality details, err: %v", err) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("unable to get ordinality details, err: %v", err)} } } log.Info("[Liveness]: Determine the leader broker pod name") podList, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.KafkaNamespace).List(context.Background(), metav1.ListOptions{LabelSelector: experimentsDetails.KafkaLabel}) if err != nil { - return "", errors.Errorf("unable to find the pods with matching labels, err: %v", err) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("unable to find the pods with matching labels, err: %v", err)} } for _, pod := range podList.Items { @@ -71,7 +72,7 @@ func LivenessStream(experimentsDetails *experimentTypes.ExperimentDetails, clien } } - return "", errors.Errorf("no kafka pod found with %v ordinality", ordinality) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("no kafka pod found with %v ordinality", ordinality)} } // CreateLivenessPod creates the kafka liveness pod @@ -185,7 +186,7 @@ func CreateLivenessPod(experimentsDetails *experimentTypes.ExperimentDetails, Ka _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.KafkaNamespace).Create(context.Background(), LivenessPod, metav1.CreateOptions{}) if err != nil { - return errors.Errorf("unable to create Liveness pod, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("unable to create liveness pod, err: %v", err)} } return nil } diff --git a/pkg/kube-aws/ebs-loss/environment/environment.go b/pkg/kube-aws/ebs-loss/environment/environment.go index bb6d60d7e..b29d7bd9a 100644 --- a/pkg/kube-aws/ebs-loss/environment/environment.go +++ b/pkg/kube-aws/ebs-loss/environment/environment.go @@ -2,6 +2,7 @@ package environment import ( "strconv" + "strings" clientTypes "k8s.io/apimachinery/pkg/types" @@ -9,7 +10,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { // setting default value for EXPERIMENT_NAME to "" as this is a common util for the ebs-loss-byid/tag experiments experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "") @@ -18,17 +19,14 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") - experimentDetails.AuxiliaryAppInfo = types.Getenv("AUXILIARY_APPINFO", "") experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2")) experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180")) - experimentDetails.EBSVolumeID = types.Getenv("EBS_VOLUME_ID", "") + experimentDetails.EBSVolumeID = strings.TrimSpace(types.Getenv("EBS_VOLUME_ID", "")) experimentDetails.VolumeTag = types.Getenv("EBS_VOLUME_TAG", "") experimentDetails.Region = types.Getenv("REGION", "") experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel") experimentDetails.VolumeAffectedPerc, _ = strconv.Atoi(types.Getenv("VOLUME_AFFECTED_PERC", "0")) - experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "") } diff --git a/pkg/kube-aws/ebs-loss/types/types.go b/pkg/kube-aws/ebs-loss/types/types.go index 0b672ab7e..f694a9ec4 100644 --- a/pkg/kube-aws/ebs-loss/types/types.go +++ b/pkg/kube-aws/ebs-loss/types/types.go @@ -11,7 +11,6 @@ type ExperimentDetails struct { ChaosDuration int ChaosInterval int RampTime int - ChaosLib string AppNS string AppLabel string AppKind string @@ -19,14 +18,11 @@ type ExperimentDetails struct { InstanceID string ChaosNamespace string ChaosPodName string - AuxiliaryAppInfo string Timeout int Delay int Sequence string VolumeTag string Region string - LIBImagePullPolicy string - TargetContainer string VolumeAffectedPerc int EBSVolumeID string TargetVolumeIDList []string diff --git a/pkg/kube-aws/ec2-terminate-by-id/environment/environment.go b/pkg/kube-aws/ec2-terminate-by-id/environment/environment.go index b74c19f81..e6f3d702b 100644 --- a/pkg/kube-aws/ec2-terminate-by-id/environment/environment.go +++ b/pkg/kube-aws/ec2-terminate-by-id/environment/environment.go @@ -2,6 +2,7 @@ package environment import ( "strconv" + "strings" clientTypes "k8s.io/apimachinery/pkg/types" @@ -9,24 +10,21 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "ec2-terminate-by-id") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") - experimentDetails.AuxiliaryAppInfo = types.Getenv("AUXILIARY_APPINFO", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2")) experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180")) - experimentDetails.Ec2InstanceID = types.Getenv("EC2_INSTANCE_ID", "") + experimentDetails.Ec2InstanceID = strings.TrimSpace(types.Getenv("EC2_INSTANCE_ID", "")) experimentDetails.Region = types.Getenv("REGION", "") experimentDetails.ManagedNodegroup = types.Getenv("MANAGED_NODEGROUP", "disable") experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel") - experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "") } diff --git a/pkg/kube-aws/ec2-terminate-by-id/types/types.go b/pkg/kube-aws/ec2-terminate-by-id/types/types.go index 188035e20..1440bc134 100644 --- a/pkg/kube-aws/ec2-terminate-by-id/types/types.go +++ b/pkg/kube-aws/ec2-terminate-by-id/types/types.go @@ -6,24 +6,20 @@ import ( // ExperimentDetails is for collecting all the experiment-related details type ExperimentDetails struct { - ExperimentName string - EngineName string - RampTime int - AuxiliaryAppInfo string - ChaosLib string - ChaosDuration int - ChaosInterval int - ChaosUID clientTypes.UID - InstanceID string - ChaosNamespace string - ChaosPodName string - Timeout int - Delay int - Ec2InstanceID string - Region string - ManagedNodegroup string - Sequence string - ActiveNodes int - LIBImagePullPolicy string - TargetContainer string + ExperimentName string + EngineName string + RampTime int + ChaosDuration int + ChaosInterval int + ChaosUID clientTypes.UID + InstanceID string + ChaosNamespace string + ChaosPodName string + Timeout int + Delay int + Ec2InstanceID string + Region string + ManagedNodegroup string + Sequence string + ActiveNodes int } diff --git a/pkg/kube-aws/ec2-terminate-by-tag/environment/environment.go b/pkg/kube-aws/ec2-terminate-by-tag/environment/environment.go index ebdecca14..3c40d710b 100644 --- a/pkg/kube-aws/ec2-terminate-by-tag/environment/environment.go +++ b/pkg/kube-aws/ec2-terminate-by-tag/environment/environment.go @@ -2,6 +2,7 @@ package environment import ( "strconv" + "strings" clientTypes "k8s.io/apimachinery/pkg/types" @@ -9,16 +10,14 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "ec2-terminate-by-tag") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") - experimentDetails.AuxiliaryAppInfo = types.Getenv("AUXILIARY_APPINFO", "") experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") @@ -26,8 +25,7 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180")) experimentDetails.Region = types.Getenv("REGION", "") experimentDetails.ManagedNodegroup = types.Getenv("MANAGED_NODEGROUP", "disable") - experimentDetails.InstanceTag = types.Getenv("INSTANCE_TAG", "") + experimentDetails.InstanceTag = strings.TrimSpace(types.Getenv("INSTANCE_TAG", "")) experimentDetails.InstanceAffectedPerc, _ = strconv.Atoi(types.Getenv("INSTANCE_AFFECTED_PERC", "0")) experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel") - experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "") } diff --git a/pkg/kube-aws/ec2-terminate-by-tag/types/types.go b/pkg/kube-aws/ec2-terminate-by-tag/types/types.go index 919cacf3f..972aa4b4e 100644 --- a/pkg/kube-aws/ec2-terminate-by-tag/types/types.go +++ b/pkg/kube-aws/ec2-terminate-by-tag/types/types.go @@ -9,8 +9,6 @@ type ExperimentDetails struct { ExperimentName string EngineName string RampTime int - AuxiliaryAppInfo string - ChaosLib string ChaosDuration int ChaosInterval int ChaosUID clientTypes.UID @@ -25,7 +23,5 @@ type ExperimentDetails struct { ManagedNodegroup string Sequence string ActiveNodes int - LIBImagePullPolicy string - TargetContainer string TargetInstanceIDList []string } diff --git a/pkg/probe/cmdprobe.go b/pkg/probe/cmdprobe.go index bb6d8b498..790df70d3 100644 --- a/pkg/probe/cmdprobe.go +++ b/pkg/probe/cmdprobe.go @@ -11,6 +11,7 @@ import ( "time" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/math" @@ -19,7 +20,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" litmusexec "github.com/litmuschaos/litmus-go/pkg/utils/exec" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -45,7 +46,7 @@ func prepareCmdProbe(probe v1alpha1.ProbeAttributes, clients clients.ClientSets, return err } default: - return fmt.Errorf("phase '%s' not supported in the cmd probe", phase) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeCmdProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("phase '%s' not supported in the cmd probe", phase)} } return nil } @@ -74,11 +75,11 @@ func triggerInlineCmdProbe(probe v1alpha1.ProbeAttributes, resultDetails *types. cmd.Stdout = &out cmd.Stderr = &errOut if err := cmd.Run(); err != nil { - return errors.Errorf("unable to run command, err: %v; error output: %v", err, errOut.String()) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeCmdProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("unable to run command: %s", errOut.String())} } rc := getAndIncrementRunCount(resultDetails, probe.Name) - if err = validateResult(probe.CmdProbeInputs.Comparator, strings.TrimSpace(out.String()), rc); err != nil { + if err = validateResult(probe.CmdProbeInputs.Comparator, probe.Name, strings.TrimSpace(out.String()), rc); err != nil { log.Errorf("the %v cmd probe has been Failed, err: %v", probe.Name, err) return err } @@ -112,11 +113,11 @@ func triggerSourceCmdProbe(probe v1alpha1.ProbeAttributes, execCommandDetails li // exec inside the external pod to get the o/p of given command output, err := litmusexec.Exec(&execCommandDetails, clients, command) if err != nil { - return errors.Errorf("Unable to get output of cmd command, err: %v", err) + return stacktrace.Propagate(err, "unable to get output of cmd command") } rc := getAndIncrementRunCount(resultDetails, probe.Name) - if err = validateResult(probe.CmdProbeInputs.Comparator, strings.TrimSpace(output), rc); err != nil { + if err = validateResult(probe.CmdProbeInputs.Comparator, probe.Name, strings.TrimSpace(output), rc); err != nil { log.Errorf("The %v cmd probe has been Failed, err: %v", probe.Name, err) return err } @@ -129,11 +130,11 @@ func triggerSourceCmdProbe(probe v1alpha1.ProbeAttributes, execCommandDetails li } // createProbePod creates an external pod with source image for the cmd probe -func createProbePod(clients clients.ClientSets, chaosDetails *types.ChaosDetails, runID string, source v1alpha1.SourceDetails) error { +func createProbePod(clients clients.ClientSets, chaosDetails *types.ChaosDetails, runID string, source v1alpha1.SourceDetails, probeName string) error { //deriving serviceAccount name for the probe pod - svcAccount, err := getServiceAccount(chaosDetails.ChaosNamespace, chaosDetails.ChaosPodName, clients) + svcAccount, err := getServiceAccount(chaosDetails.ChaosNamespace, chaosDetails.ChaosPodName, probeName, clients) if err != nil { - return errors.Errorf("unable to get the serviceAccountName, err: %v", err) + return stacktrace.Propagate(err, "unable to get the serviceAccountName") } expEnv, volume, expVolumeMount := inheritInputs(clients, chaosDetails.ChaosNamespace, chaosDetails.ChaosPodName, source) @@ -171,7 +172,11 @@ func createProbePod(clients clients.ClientSets, chaosDetails *types.ChaosDetails } _, err = clients.KubeClient.CoreV1().Pods(chaosDetails.ChaosNamespace).Create(context.Background(), cmdProbe, v1.CreateOptions{}) - return err + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeCmdProbe, Target: fmt.Sprintf("{name: %v}", probeName), Reason: err.Error()} + } + + return nil } // inheritInputs will inherit the experiment details(ENVs and volumes) to the probe pod based on inheritInputs flag @@ -265,11 +270,11 @@ func getProbeCmd(sourceCmd []string) []string { return sourceCmd } -//deleteProbePod deletes the probe pod and wait until it got terminated -func deleteProbePod(chaosDetails *types.ChaosDetails, clients clients.ClientSets, runID string) error { +// deleteProbePod deletes the probe pod and wait until it got terminated +func deleteProbePod(chaosDetails *types.ChaosDetails, clients clients.ClientSets, runID, probeName string) error { if err := clients.KubeClient.CoreV1().Pods(chaosDetails.ChaosNamespace).Delete(context.Background(), chaosDetails.ExperimentName+"-probe-"+runID, v1.DeleteOptions{}); err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeCmdProbe, Target: fmt.Sprintf("{name: %v}", probeName), Reason: err.Error()} } // waiting till the termination of the pod @@ -279,9 +284,9 @@ func deleteProbePod(chaosDetails *types.ChaosDetails, clients clients.ClientSets Try(func(attempt uint) error { podSpec, err := clients.KubeClient.CoreV1().Pods(chaosDetails.ChaosNamespace).List(context.Background(), v1.ListOptions{LabelSelector: chaosDetails.ExperimentName + "-probe-" + runID}) if err != nil { - return errors.Errorf("Probe Pod is not deleted yet, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeCmdProbe, Target: fmt.Sprintf("{name: %v}", probeName), Reason: fmt.Sprintf("failed to list probe pod: %s", err.Error())} } else if len(podSpec.Items) != 0 { - return errors.Errorf("Probe Pod is not deleted yet") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeCmdProbe, Target: fmt.Sprintf("{name: %v}", probeName), Reason: "probe pod is not deleted within timeout"} } return nil }) @@ -314,6 +319,7 @@ loop: err = triggerInlineCmdProbe(probe, chaosresult) // record the error inside the probeDetails, we are maintaining a dedicated variable for the err, inside probeDetails if err != nil { + err = addProbePhase(err, string(chaosDetails.Phase)) for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err @@ -363,6 +369,7 @@ loop: default: // record the error inside the probeDetails, we are maintaining a dedicated variable for the err, inside probeDetails if err = triggerInlineCmdProbe(probe, chaosresult); err != nil { + err = addProbePhase(err, string(chaosDetails.Phase)) for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err @@ -412,6 +419,7 @@ loop: default: // record the error inside the probeDetails, we are maintaining a dedicated variable for the err, inside probeDetails if err = triggerSourceCmdProbe(probe, execCommandDetails, clients, chaosresult); err != nil { + err = addProbePhase(err, string(chaosDetails.Phase)) for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err @@ -453,6 +461,7 @@ loop: err = triggerSourceCmdProbe(probe, execCommandDetails, clients, chaosresult) // record the error inside the probeDetails, we are maintaining a dedicated variable for the err, inside probeDetails if err != nil { + err = addProbePhase(err, string(chaosDetails.Phase)) for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err @@ -477,33 +486,34 @@ loop: // validateResult validate the probe result to specified comparison operation // it supports int, float, string operands -func validateResult(comparator v1alpha1.ComparatorInfo, cmdOutput string, rc int) error { +func validateResult(comparator v1alpha1.ComparatorInfo, probeName, cmdOutput string, rc int) error { compare := cmp.RunCount(rc). FirstValue(cmdOutput). SecondValue(comparator.Value). - Criteria(comparator.Criteria) + Criteria(comparator.Criteria). + ProbeName(probeName) switch strings.ToLower(comparator.Type) { case "int": - if err = compare.CompareInt(); err != nil { + if err = compare.CompareInt(cerrors.ErrorTypeCmdProbe); err != nil { return err } case "float": - if err = compare.CompareFloat(); err != nil { + if err = compare.CompareFloat(cerrors.ErrorTypeCmdProbe); err != nil { return err } case "string": - if err = compare.CompareString(); err != nil { + if err = compare.CompareString(cerrors.ErrorTypeCmdProbe); err != nil { return err } default: - return fmt.Errorf("comparator type '%s' not supported in the cmd probe", comparator.Type) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{name: %v}", probeName), Reason: fmt.Sprintf("comparator type '%s' not supported in the cmd probe", comparator.Type)} } return nil } -//preChaosCmdProbe trigger the cmd probe for prechaos phase +// preChaosCmdProbe trigger the cmd probe for prechaos phase func preChaosCmdProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.ResultDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { switch probe.Mode { @@ -555,7 +565,7 @@ func preChaosCmdProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Resul runID := getRunIDFromProbe(resultDetails, probe.Name, probe.Type) // deleting the external pod which was created for cmd probe - if err = deleteProbePod(chaosDetails, clients, runID); err != nil { + if err = deleteProbePod(chaosDetails, clients, runID, probe.Name); err != nil { return err } } @@ -589,7 +599,7 @@ func preChaosCmdProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Resul return nil } -//postChaosCmdProbe trigger cmd probe for post chaos phase +// postChaosCmdProbe trigger cmd probe for post chaos phase func postChaosCmdProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.ResultDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { switch probe.Mode { @@ -641,7 +651,7 @@ func postChaosCmdProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Resu runID := getRunIDFromProbe(resultDetails, probe.Name, probe.Type) // deleting the external pod which was created for cmd probe - if err = deleteProbePod(chaosDetails, clients, runID); err != nil { + if err = deleteProbePod(chaosDetails, clients, runID, probe.Name); err != nil { return err } } @@ -664,7 +674,7 @@ func postChaosCmdProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Resu // get runId runID := getRunIDFromProbe(resultDetails, probe.Name, probe.Type) // deleting the external pod, which was created for cmd probe - if err = deleteProbePod(chaosDetails, clients, runID); err != nil { + if err = deleteProbePod(chaosDetails, clients, runID, probe.Name); err != nil { return err } @@ -673,7 +683,7 @@ func postChaosCmdProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Resu return nil } -//onChaosCmdProbe trigger the cmd probe for DuringChaos phase +// onChaosCmdProbe trigger the cmd probe for DuringChaos phase func onChaosCmdProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.ResultDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { switch probe.Mode { @@ -713,14 +723,14 @@ func createHelperPod(probe v1alpha1.ProbeAttributes, resultDetails *types.Result setRunIDForProbe(resultDetails, probe.Name, probe.Type, runID) // create the external pod with source image for cmd probe - if err := createProbePod(clients, chaosDetails, runID, probe.CmdProbeInputs.Source); err != nil { + if err := createProbePod(clients, chaosDetails, runID, probe.CmdProbeInputs.Source, probe.Name); err != nil { return litmusexec.PodDetails{}, err } // verify the running status of external probe pod log.Info("[Status]: Checking the status of the probe pod") if err = status.CheckApplicationStatusesByLabels(chaosDetails.ChaosNamespace, "name="+chaosDetails.ExperimentName+"-probe-"+runID, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return litmusexec.PodDetails{}, errors.Errorf("probe pod is not in running state, err: %v", err) + return litmusexec.PodDetails{}, stacktrace.Propagate(err, "probe pod is not in running state") } // setting the attributes for the exec command @@ -731,10 +741,10 @@ func createHelperPod(probe v1alpha1.ProbeAttributes, resultDetails *types.Result } // getServiceAccount derive the serviceAccountName for the probe pod -func getServiceAccount(chaosNamespace, chaosPodName string, clients clients.ClientSets) (string, error) { +func getServiceAccount(chaosNamespace, chaosPodName, probeName string, clients clients.ClientSets) (string, error) { pod, err := clients.KubeClient.CoreV1().Pods(chaosNamespace).Get(context.Background(), chaosPodName, v1.GetOptions{}) if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeCmdProbe, Target: fmt.Sprintf("{name: %v}", probeName), Reason: err.Error()} } return pod.Spec.ServiceAccountName, nil } diff --git a/pkg/probe/comparator/comparator.go b/pkg/probe/comparator/comparator.go index f0371862d..f87563c01 100644 --- a/pkg/probe/comparator/comparator.go +++ b/pkg/probe/comparator/comparator.go @@ -1,46 +1,53 @@ package comparator -//Model contains operands and operator for the comparison operations +// Model contains operands and operator for the comparison operations // a and b attribute belongs to operands and operator attribute belongs to operator type Model struct { - a interface{} - b interface{} - operator string - rc int + a interface{} + b interface{} + operator string + rc int + probeName string } -//RunCount sets the run counts +// RunCount sets the run counts func RunCount(rc int) *Model { model := Model{} return model.RunCount(rc) } -//RunCount sets the run counts +// RunCount sets the run counts func (model *Model) RunCount(rc int) *Model { model.rc = rc return model } -//FirstValue sets the first operands +// FirstValue sets the first operands func FirstValue(a interface{}) *Model { model := Model{} return model.FirstValue(a) } -//FirstValue sets the first operands +// FirstValue sets the first operands func (model *Model) FirstValue(a interface{}) *Model { model.a = a return model } -//SecondValue sets the second operand +// SecondValue sets the second operand func (model *Model) SecondValue(b interface{}) *Model { model.b = b return model } -//Criteria sets the criteria/operator +// Criteria sets the criteria/operator func (model *Model) Criteria(criteria string) *Model { model.operator = criteria return model } + +// ProbeName sets the name of the probe under evaluation +func (model *Model) ProbeName(probeName string) *Model { + model.probeName = probeName + return model +} diff --git a/pkg/probe/comparator/float.go b/pkg/probe/comparator/float.go index a9c78aed0..d8f359d95 100644 --- a/pkg/probe/comparator/float.go +++ b/pkg/probe/comparator/float.go @@ -1,17 +1,18 @@ package comparator import ( + "fmt" "reflect" "strconv" "strings" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/pkg/errors" ) // CompareFloat compares floating numbers for specific operation // it check for the >=, >, <=, <, ==, != operators -func (model Model) CompareFloat() error { +func (model Model) CompareFloat(errorCode cerrors.ErrorType) error { obj := Float{} obj.setValues(reflect.ValueOf(model.a).String(), reflect.ValueOf(model.b).String()) @@ -23,41 +24,41 @@ func (model Model) CompareFloat() error { switch model.operator { case ">=": if !obj.isGreaterorEqual() { - return errors.Errorf("{actual value: %v} is not greater than or equal to {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not greater than or equal to expected value: %v", obj.a, obj.b)} } case "<=": if !obj.isLesserorEqual() { - return errors.Errorf("{actual value: %v} is not lesser than or equal to {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not lesser than or equal to expected value: %v", obj.a, obj.b)} } case ">": if !obj.isGreater() { - return errors.Errorf("{actual value: %v} is not greater than {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not greater than expected value: %v", obj.a, obj.b)} } case "<": if !obj.isLesser() { - return errors.Errorf("{actual value: %v} is not lesser than {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not lesser than expected value: %v", obj.a, obj.b)} } case "==": if !obj.isEqual() { - return errors.Errorf("{actual value: %v} is not equal to {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not equal to expected value: %v", obj.a, obj.b)} } case "!=": if !obj.isNotEqual() { - return errors.Errorf("{actual value: %v} is not Notequal to {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not Notequal to expected value: %v", obj.a, obj.b)} } case "OneOf", "oneOf": if !obj.isOneOf() { - return errors.Errorf("Actual value: {%v} doesn't matched with any of the expected values: {%v}", obj.a, obj.c) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v doesn't matched with any of the expected values: %v", obj.a, obj.c)} } case "between", "Between": if len(obj.c) < 2 { - return errors.Errorf("{expected value: %v} should contains both lower and upper limits", obj.c) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("expected value: %v should contains both lower and upper limits", obj.c)} } if !obj.isBetween() { - return errors.Errorf("Actual value: {%v} doesn't lie in between expected range: {%v}", obj.a, obj.c) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v doesn't lie in between expected range: %v", obj.a, obj.c)} } default: - return errors.Errorf("criteria '%s' not supported in the probe", model.operator) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("criteria '%s' not supported in the probe", model.operator)} } return nil } diff --git a/pkg/probe/comparator/integer.go b/pkg/probe/comparator/integer.go index e2910671a..fbe48386e 100644 --- a/pkg/probe/comparator/integer.go +++ b/pkg/probe/comparator/integer.go @@ -1,17 +1,18 @@ package comparator import ( + "fmt" "reflect" "strconv" "strings" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/pkg/errors" ) // CompareInt compares integer numbers for specific operation // it check for the >=, >, <=, <, ==, != operators -func (model Model) CompareInt() error { +func (model Model) CompareInt(errorCode cerrors.ErrorType) error { obj := Integer{} obj.setValues(reflect.ValueOf(model.a).String(), reflect.ValueOf(model.b).String()) @@ -23,41 +24,41 @@ func (model Model) CompareInt() error { switch model.operator { case ">=": if !obj.isGreaterorEqual() { - return errors.Errorf("{actual value: %v} is not greater than or equal to {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not greater than or equal to expected value: %v", obj.a, obj.b)} } case "<=": if !obj.isLesserorEqual() { - return errors.Errorf("{actual value: %v} is not lesser than or equal to {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not lesser than or equal to expected value: %v", obj.a, obj.b)} } case ">": if !obj.isGreater() { - return errors.Errorf("{actual value: %v} is not greater than {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not greater than expected value: %v", obj.a, obj.b)} } case "<": if !obj.isLesser() { - return errors.Errorf("{actual value: %v} is not lesser than {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not lesser than expected value: %v", obj.a, obj.b)} } case "==": if !obj.isEqual() { - return errors.Errorf("{actual value: %v} is not equal to {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not equal to expected value: %v", obj.a, obj.b)} } case "!=": if !obj.isNotEqual() { - return errors.Errorf("{actual value: %v} is not NotEqual to {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not NotEqual to expected value: %v", obj.a, obj.b)} } case "OneOf", "oneOf": if !obj.isOneOf() { - return errors.Errorf("Actual value: {%v} doesn't matched with any of the expected values: {%v}", obj.a, obj.c) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v doesn't matched with any of the expected values: %v", obj.a, obj.c)} } case "between", "Between": if len(obj.c) < 2 { - return errors.Errorf("{expected value: %v} should contains both lower and upper limits", obj.c) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("expected value: %v should contains both lower and upper limits", obj.c)} } if !obj.isBetween() { - return errors.Errorf("Actual value: {%v} doesn't lie in between expected range: {%v}", obj.a, obj.c) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v doesn't lie in between expected range: %v", obj.a, obj.c)} } default: - return errors.Errorf("criteria '%s' not supported in the probe", model.operator) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("criteria '%s' not supported in the probe", model.operator)} } return nil } diff --git a/pkg/probe/comparator/string.go b/pkg/probe/comparator/string.go index d6654f2cf..c75ef682f 100644 --- a/pkg/probe/comparator/string.go +++ b/pkg/probe/comparator/string.go @@ -1,17 +1,18 @@ package comparator import ( + "fmt" "reflect" "regexp" "strings" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/pkg/errors" ) // CompareString compares strings for specific operation // it check for the equal, not equal and contains(sub-string) operations -func (model Model) CompareString() error { +func (model Model) CompareString(errorCode cerrors.ErrorType) error { obj := String{} obj.setValues(reflect.ValueOf(model.a).String(), reflect.ValueOf(model.b).String()) @@ -23,38 +24,38 @@ func (model Model) CompareString() error { switch model.operator { case "equal", "Equal": if !obj.isEqual() { - return errors.Errorf("{actual value: %v} is not equal to {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not equal to expected value: %v", obj.a, obj.b)} } case "notEqual", "NotEqual": if !obj.isNotEqual() { - return errors.Errorf("{actual value: %v} is not Notequal to {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not Notequal to expected value: %v", obj.a, obj.b)} } case "contains", "Contains": if !obj.isContains() { - return errors.Errorf("{actual value: %v} doesn't contains {expected value: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v doesn't contains expected value: %v", obj.a, obj.b)} } case "matches", "Matches": re, err := regexp.Compile(obj.b) if err != nil { - return errors.Errorf("the probe regex '%s' is not a valid expression", obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("the probe regex '%s' is not a valid expression", obj.b)} } if !obj.isMatched(re) { - return errors.Errorf("{actual value: %v} is not matched with {expected regex: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not matched with expected regex: %v", obj.a, obj.b)} } case "notMatches", "NotMatches": re, err := regexp.Compile(obj.b) if err != nil { - return errors.Errorf("the probe regex '%s' is not a valid expression", obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("the probe regex '%s' is not a valid expression", obj.b)} } if obj.isMatched(re) { - return errors.Errorf("{actual value: %v} is not NotMatched with {expected regex: %v}", obj.a, obj.b) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v is not NotMatched with expected regex: %v", obj.a, obj.b)} } case "oneOf", "OneOf": if !obj.isOneOf() { - return errors.Errorf("Actual value: {%v} doesn't matched with any of the expected values: {%v}", obj.a, obj.c) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("actual value: %v doesn't matched with any of the expected values: %v", obj.a, obj.c)} } default: - return errors.Errorf("criteria '%s' not supported in the probe", model.operator) + return cerrors.Error{ErrorCode: errorCode, Target: model.probeName, Reason: fmt.Sprintf("criteria '%s' not supported in the probe", model.operator)} } return nil } diff --git a/pkg/probe/httpprobe.go b/pkg/probe/httpprobe.go index 3b4ec3bfa..4bf5dde9d 100644 --- a/pkg/probe/httpprobe.go +++ b/pkg/probe/httpprobe.go @@ -13,13 +13,13 @@ import ( "net/http" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/math" cmp "github.com/litmuschaos/litmus-go/pkg/probe/comparator" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -39,7 +39,7 @@ func prepareHTTPProbe(probe v1alpha1.ProbeAttributes, clients clients.ClientSets case "duringchaos": onChaosHTTPProbe(probe, resultDetails, clients, chaosDetails) default: - return errors.Errorf("phase '%s' not supported in the http probe", phase) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHttpProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("phase '%s' not supported in the http probe", phase)} } return nil } @@ -116,7 +116,7 @@ func httpGet(probe v1alpha1.ProbeAttributes, client *http.Client, resultDetails // getting the response from the given url resp, err := client.Get(probe.HTTPProbeInputs.URL) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHttpProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: err.Error()} } code := strconv.Itoa(resp.StatusCode) @@ -127,7 +127,8 @@ func httpGet(probe v1alpha1.ProbeAttributes, client *http.Client, resultDetails FirstValue(code). SecondValue(probe.HTTPProbeInputs.Method.Get.ResponseCode). Criteria(probe.HTTPProbeInputs.Method.Get.Criteria). - CompareInt(); err != nil { + ProbeName(probe.Name). + CompareInt(cerrors.ErrorTypeHttpProbe); err != nil { log.Errorf("The %v http probe get method has Failed, err: %v", probe.Name, err) return err } @@ -137,7 +138,7 @@ func httpGet(probe v1alpha1.ProbeAttributes, client *http.Client, resultDetails // httpPost send the http post request to the given URL func httpPost(probe v1alpha1.ProbeAttributes, client *http.Client, resultDetails *types.ResultDetails) error { - body, err := getHTTPBody(probe.HTTPProbeInputs.Method.Post) + body, err := getHTTPBody(probe.HTTPProbeInputs.Method.Post, probe.Name) if err != nil { return err } @@ -149,7 +150,7 @@ func httpPost(probe v1alpha1.ProbeAttributes, client *http.Client, resultDetails Try(func(attempt uint) error { resp, err := client.Post(probe.HTTPProbeInputs.URL, probe.HTTPProbeInputs.Method.Post.ContentType, strings.NewReader(body)) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHttpProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: err.Error()} } code := strconv.Itoa(resp.StatusCode) rc := getAndIncrementRunCount(resultDetails, probe.Name) @@ -159,7 +160,8 @@ func httpPost(probe v1alpha1.ProbeAttributes, client *http.Client, resultDetails FirstValue(code). SecondValue(probe.HTTPProbeInputs.Method.Post.ResponseCode). Criteria(probe.HTTPProbeInputs.Method.Post.Criteria). - CompareInt(); err != nil { + ProbeName(probe.Name). + CompareInt(cerrors.ErrorTypeHttpProbe); err != nil { log.Errorf("The %v http probe post method has Failed, err: %v", probe.Name, err) return err } @@ -170,7 +172,7 @@ func httpPost(probe v1alpha1.ProbeAttributes, client *http.Client, resultDetails // getHTTPBody fetch the http body for the post request // It will use body or bodyPath attributes to get the http request body // if both are provided, it will use body field -func getHTTPBody(httpBody v1alpha1.PostMethod) (string, error) { +func getHTTPBody(httpBody v1alpha1.PostMethod, probeName string) (string, error) { if httpBody.Body != "" { return httpBody.Body, nil @@ -181,7 +183,7 @@ func getHTTPBody(httpBody v1alpha1.PostMethod) (string, error) { if httpBody.BodyPath != "" { command = "cat " + httpBody.BodyPath } else { - return "", errors.Errorf("[Probe]: Any one of body or bodyPath is required") + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeHttpProbe, Target: fmt.Sprintf("{name: %v}", probeName), Reason: "[Probe]: Any one of body or bodyPath is required"} } var out, errOut bytes.Buffer @@ -190,7 +192,7 @@ func getHTTPBody(httpBody v1alpha1.PostMethod) (string, error) { cmd.Stdout = &out cmd.Stderr = &errOut if err := cmd.Run(); err != nil { - return "", fmt.Errorf("unable to run command, err: %v; error output: %v", err, errOut.String()) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeHttpProbe, Target: fmt.Sprintf("{name: %v}", probeName), Reason: fmt.Sprintf("unable to run command, err: %v; error output: %v", err, errOut.String())} } return out.String(), nil } @@ -211,6 +213,7 @@ loop: err = triggerHTTPProbe(probe, chaosresult) // record the error inside the probeDetails, we are maintaining a dedicated variable for the err, inside probeDetails if err != nil { + err = addProbePhase(err, string(chaosDetails.Phase)) for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err @@ -228,12 +231,12 @@ loop: // and failed the experiment in the end if isExperimentFailed && probe.RunProperties.StopOnFailure { if err := stopChaosEngine(probe, clients, chaosresult, chaosDetails); err != nil { - log.Errorf("unable to patch chaosengine to stop, err: %v", err) + log.Errorf("Unable to patch chaosengine to stop, err: %v", err) } } } -//preChaosHTTPProbe trigger the http probe for prechaos phase +// preChaosHTTPProbe trigger the http probe for prechaos phase func preChaosHTTPProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.ResultDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { switch probe.Mode { @@ -276,7 +279,7 @@ func preChaosHTTPProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Resu return nil } -//postChaosHTTPProbe trigger the http probe for postchaos phase +// postChaosHTTPProbe trigger the http probe for postchaos phase func postChaosHTTPProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.ResultDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { switch probe.Mode { @@ -343,6 +346,7 @@ loop: err = triggerHTTPProbe(probe, chaosresult) // record the error inside the probeDetails, we are maintaining a dedicated variable for the err, inside probeDetails if err != nil { + err = addProbePhase(err, string(chaosDetails.Phase)) for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err @@ -366,7 +370,7 @@ loop: } } -//onChaosHTTPProbe trigger the http probe for DuringChaos phase +// onChaosHTTPProbe trigger the http probe for DuringChaos phase func onChaosHTTPProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.ResultDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) { switch probe.Mode { diff --git a/pkg/probe/k8sprobe.go b/pkg/probe/k8sprobe.go index dcbca55b0..76be5ba87 100644 --- a/pkg/probe/k8sprobe.go +++ b/pkg/probe/k8sprobe.go @@ -2,17 +2,19 @@ package probe import ( "context" - k8serrors "k8s.io/apimachinery/pkg/api/errors" + "fmt" "strings" "time" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/math" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" "github.com/sirupsen/logrus" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -35,7 +37,7 @@ func prepareK8sProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Result case "duringchaos": onChaosK8sProbe(probe, resultDetails, clients, chaosDetails) default: - return errors.Errorf("phase '%s' not supported in the k8s probe", phase) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("phase '%s' not supported in the k8s probe", phase)} } return nil } @@ -106,7 +108,7 @@ func triggerK8sProbe(probe v1alpha1.ProbeAttributes, clients clients.ClientSets, return err } default: - return errors.Errorf("operation type '%s' not supported in the k8s probe", inputs.Operation) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("operation type '%s' not supported in the k8s probe", inputs.Operation)} } return nil @@ -122,13 +124,14 @@ func triggerContinuousK8sProbe(probe v1alpha1.ProbeAttributes, clients clients.C time.Sleep(time.Duration(probe.RunProperties.InitialDelaySeconds) * time.Second) } - // it trigger the k8s probe for the entire duration of chaos and it fails, if any error encounter + // it triggers the k8s probe for the entire duration of chaos and it fails, if any error encounter // marked the error for the probes, if any loop: for { err = triggerK8sProbe(probe, clients, chaosresult) // record the error inside the probeDetails, we are maintaining a dedicated variable for the err, inside probeDetails if err != nil { + err = addProbePhase(err, string(chaosDetails.Phase)) for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err @@ -158,7 +161,7 @@ func createResource(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionResou data := &unstructured.Unstructured{} _, _, err = decUnstructured.Decode([]byte(probe.Data), nil, data) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: err.Error()} } _, err := clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).Create(context.Background(), data, v1.CreateOptions{}) @@ -176,7 +179,7 @@ func deleteResource(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionResou // delete resources for _, res := range parsedResourceNames { if err = clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).Delete(context.Background(), res, v1.DeleteOptions{}); err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: err.Error()} } } } else { @@ -185,14 +188,14 @@ func deleteResource(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionResou LabelSelector: probe.K8sProbeInputs.LabelSelector, }) if err != nil { - return errors.Errorf("unable to list the resources with matching selector, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("unable to list the resources with matching selector, err: %v", err)} } else if len(resourceList.Items) == 0 { - return errors.Errorf("no resource found with provided selectors") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: "no resource found with provided selectors"} } for index := range resourceList.Items { if err = clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).Delete(context.Background(), resourceList.Items[index].GetName(), v1.DeleteOptions{}); err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: err.Error()} } } } @@ -213,9 +216,9 @@ func resourcesPresent(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionRes }) if err != nil { log.Errorf("the %v k8s probe has Failed, err: %v", probe.Name, err) - return errors.Errorf("unable to list the resources with matching selector, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("unable to list the resources with matching selector, err: %v", err)} } else if len(resourceList.Items) == 0 { - return errors.Errorf("no resource found with provided selectors") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: "no resource found with provided selectors"} } } return nil @@ -225,9 +228,9 @@ func areResourcesWithNamePresent(probe v1alpha1.ProbeAttributes, gvr schema.Grou for _, res := range parsedResourceNames { resource, err := clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).Get(context.Background(), res, v1.GetOptions{}) if err != nil { - return errors.Errorf("unable to get the resources with name %v, err: %v", res, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("unable to get the resources with name %v, err: %v", res, err)} } else if resource == nil { - return errors.Errorf("unable to get the resources with name %v", res) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("unable to get the resources with name %v", res)} } } return nil @@ -242,10 +245,10 @@ func resourcesAbsent(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionReso if err != nil { // ignore not found error, that is the expected outcome if !k8serrors.IsNotFound(err) { - return errors.Errorf("unable to get the resources with name %v from k8s, err: %v", res, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("unable to get the resources with name %v from k8s, err: %v", res, err)} } } else if resource != nil { - return errors.Errorf("resource '%v' still exists but is expected to be absent", res) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("resource '%v' still exists but is expected to be absent", res)} } } } else { @@ -254,16 +257,16 @@ func resourcesAbsent(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionReso LabelSelector: probe.K8sProbeInputs.LabelSelector, }) if err != nil { - return errors.Errorf("unable to list the resources with matching selector, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("unable to list the resources with matching selector, err: %v", err)} } if len(resourceList.Items) != 0 { - return errors.Errorf("resource with provided selectors still exists, found %v resources with matching selectors", len(resourceList.Items)) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeK8sProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("resource with provided selectors still exists, found %v resources with matching selectors", len(resourceList.Items))} } } return nil } -//preChaosK8sProbe trigger the k8s probe for prechaos phase +// preChaosK8sProbe trigger the k8s probe for prechaos phase func preChaosK8sProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.ResultDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { switch strings.ToLower(probe.Mode) { @@ -305,7 +308,7 @@ func preChaosK8sProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Resul return nil } -//postChaosK8sProbe trigger the k8s probe for postchaos phase +// postChaosK8sProbe trigger the k8s probe for postchaos phase func postChaosK8sProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.ResultDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { switch strings.ToLower(probe.Mode) { @@ -343,7 +346,7 @@ func postChaosK8sProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Resu return nil } -//onChaosK8sProbe trigger the k8s probe for DuringChaos phase +// onChaosK8sProbe trigger the k8s probe for DuringChaos phase func onChaosK8sProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.ResultDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) { switch strings.ToLower(probe.Mode) { @@ -388,6 +391,7 @@ loop: err = triggerK8sProbe(probe, clients, chaosresult) // record the error inside the probeDetails, we are maintaining a dedicated variable for the err, inside probeDetails if err != nil { + err = addProbePhase(err, string(chaosDetails.Phase)) for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err diff --git a/pkg/probe/probe.go b/pkg/probe/probe.go index 0ea009067..35b771206 100644 --- a/pkg/probe/probe.go +++ b/pkg/probe/probe.go @@ -10,11 +10,12 @@ import ( "github.com/kyokomi/emoji" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" + "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -55,18 +56,18 @@ func RunProbes(chaosDetails *types.ChaosDetails, clients clients.ClientSets, res // execute the probes for the postchaos phase // it first evaluate the onchaos and continuous modes then it evaluates the other modes // as onchaos and continuous probes are already completed - var probeError []error + var probeError []string for _, probe := range probes { // evaluate continuous and onchaos probes switch strings.ToLower(probe.Mode) { case "onchaos", "continuous": if err := execute(probe, chaosDetails, clients, resultDetails, phase); err != nil { - probeError = append(probeError, err) + probeError = append(probeError, stacktrace.RootCause(err).Error()) } } } if len(probeError) != 0 { - return errors.Errorf("probes failed, err: %v", probeError) + return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(probeError, ","))} } // executes the eot and edge modes for _, probe := range probes { @@ -81,7 +82,7 @@ func RunProbes(chaosDetails *types.ChaosDetails, clients clients.ClientSets, res return nil } -//setProbeVerdict mark the verdict of the probe in the chaosresult as passed +// setProbeVerdict mark the verdict of the probe in the chaosresult as passed // on the basis of phase(pre/post chaos) func setProbeVerdict(resultDetails *types.ResultDetails, probe v1alpha1.ProbeAttributes, verdict v1alpha1.ProbeVerdict, description string) { for index, probes := range resultDetails.ProbeDetails { @@ -98,7 +99,7 @@ func setProbeVerdict(resultDetails *types.ResultDetails, probe v1alpha1.ProbeAtt } } -//SetProbeVerdictAfterFailure mark the verdict of all the failed/unrun probes as failed +// SetProbeVerdictAfterFailure mark the verdict of all the failed/unrun probes as failed func SetProbeVerdictAfterFailure(result *v1alpha1.ChaosResult) { for index := range result.Status.ProbeStatuses { if result.Status.ProbeStatuses[index].Status.Verdict == v1alpha1.ProbeVerdictAwaited { @@ -119,7 +120,7 @@ func getProbesFromEngine(chaosDetails *types.ChaosDetails, clients clients.Clien Try(func(attempt uint) error { engine, err := clients.LitmusClient.ChaosEngines(chaosDetails.ChaosNamespace).Get(context.Background(), chaosDetails.EngineName, v1.GetOptions{}) if err != nil { - return fmt.Errorf("unable to Get the chaosengine, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to get the chaosengine, err: %v", err)} } // get all the probes defined inside chaosengine for the corresponding experiment for _, experiment := range engine.Spec.Experiments { @@ -136,7 +137,7 @@ func getProbesFromEngine(chaosDetails *types.ChaosDetails, clients clients.Clien } // InitializeProbesInChaosResultDetails set the probe inside chaos result -// it fetch the probe details from the chaosengine and set into the chaosresult +// it fetches the probe details from the chaosengine and set into the chaosresult func InitializeProbesInChaosResultDetails(chaosDetails *types.ChaosDetails, clients clients.ClientSets, chaosresult *types.ResultDetails) error { var probeDetails []types.ProbeDetails @@ -164,7 +165,7 @@ func InitializeProbesInChaosResultDetails(chaosDetails *types.ChaosDetails, clie return nil } -//getAndIncrementRunCount return the run count for the specified probe +// getAndIncrementRunCount return the run count for the specified probe func getAndIncrementRunCount(resultDetails *types.ResultDetails, probeName string) int { for index, probe := range resultDetails.ProbeDetails { if probeName == probe.Name { @@ -175,7 +176,7 @@ func getAndIncrementRunCount(resultDetails *types.ResultDetails, probeName strin return 0 } -//getRunIDFromProbe return the run_id for the dedicated probe +// getRunIDFromProbe return the run_id for the dedicated probe // which will used in the continuous cmd probe, run_id is used as suffix in the external pod name func getRunIDFromProbe(resultDetails *types.ResultDetails, probeName, probeType string) string { @@ -187,7 +188,7 @@ func getRunIDFromProbe(resultDetails *types.ResultDetails, probeName, probeType return "" } -//setRunIDForProbe set the run_id for the dedicated probe. +// setRunIDForProbe set the run_id for the dedicated probe. // which will used in the continuous cmd probe, run_id is used as suffix in the external pod name func setRunIDForProbe(resultDetails *types.ResultDetails, probeName, probeType, runid string) { @@ -236,7 +237,10 @@ func markedVerdictInEnd(err error, resultDetails *types.ResultDetails, probe v1a } setProbeVerdict(resultDetails, probe, probeVerdict, description) - if !probe.RunProperties.StopOnFailure { + if !probe.RunProperties.StopOnFailure && err != nil { + for index := range resultDetails.ProbeDetails { + resultDetails.ProbeDetails[index].IsProbeFailedWithError = err + } return nil } return err @@ -245,13 +249,13 @@ func markedVerdictInEnd(err error, resultDetails *types.ResultDetails, probe v1a func getDescription(mode, phase string) string { switch mode { case "edge": - return fmt.Sprintf("'%v' Probe didn't met the passing criteria", phase) + return fmt.Sprintf("Probe didn't met the passing criteria in phase: %s", phase) default: return "Probe didn't met the passing criteria" } } -//CheckForErrorInContinuousProbe check for the error in the continuous probes +// CheckForErrorInContinuousProbe check for the error in the continuous probes func checkForErrorInContinuousProbe(resultDetails *types.ResultDetails, probeName string) error { for index, probe := range resultDetails.ProbeDetails { @@ -273,7 +277,7 @@ func parseCommand(templatedCommand string, resultDetails *types.ResultDetails) ( // store the parsed output in the buffer var out bytes.Buffer if err := t.Execute(&out, register); err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to parse the templated command, %s", err.Error())} } return out.String(), nil @@ -288,11 +292,15 @@ func stopChaosEngine(probe v1alpha1.ProbeAttributes, clients clients.ClientSets, //patch chaosengine's state to stop engine, err := clients.LitmusClient.ChaosEngines(chaosDetails.ChaosNamespace).Get(context.Background(), chaosDetails.EngineName, v1.GetOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get chaosengine, %s", err.Error())} } engine.Spec.EngineState = v1alpha1.EngineStateStop _, err = clients.LitmusClient.ChaosEngines(chaosDetails.ChaosNamespace).Update(context.Background(), engine, v1.UpdateOptions{}) - return err + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to patch the chaosengine to `stop` state, %v", err.Error())} + } + + return nil } // execute contains steps to execute & evaluate probes in different modes at different phases @@ -301,25 +309,25 @@ func execute(probe v1alpha1.ProbeAttributes, chaosDetails *types.ChaosDetails, c case "k8sprobe": // it contains steps to prepare the k8s probe if err = prepareK8sProbe(probe, resultDetails, clients, phase, chaosDetails); err != nil { - return errors.Errorf("probes failed, err: %v", err) + return stacktrace.Propagate(err, "probes failed") } case "cmdprobe": // it contains steps to prepare cmd probe if err = prepareCmdProbe(probe, clients, chaosDetails, resultDetails, phase); err != nil { - return errors.Errorf("probes failed, err: %v", err) + return stacktrace.Propagate(err, "probes failed") } case "httpprobe": // it contains steps to prepare http probe if err = prepareHTTPProbe(probe, clients, chaosDetails, resultDetails, phase); err != nil { - return errors.Errorf("probes failed, err: %v", err) + return stacktrace.Propagate(err, "probes failed") } case "promprobe": // it contains steps to prepare prom probe if err = preparePromProbe(probe, clients, chaosDetails, resultDetails, phase); err != nil { - return errors.Errorf("probes failed, err: %v", err) + return stacktrace.Propagate(err, "probes failed") } default: - return errors.Errorf("No supported probe type found, type: %v", probe.Type) + return stacktrace.Propagate(err, "%v probe type not supported", probe.Type) } return nil } @@ -332,3 +340,12 @@ func getProbeVerdict(resultDetails *types.ResultDetails, name, probeType string) } return v1alpha1.ProbeVerdictNA } + +func addProbePhase(err error, phase string) error { + rootCause := stacktrace.RootCause(err) + if error, ok := rootCause.(cerrors.Error); ok { + error.Phase = phase + err = error + } + return err +} diff --git a/pkg/probe/promProbe.go b/pkg/probe/promProbe.go index f34f70478..26d950bf3 100644 --- a/pkg/probe/promProbe.go +++ b/pkg/probe/promProbe.go @@ -8,13 +8,13 @@ import ( "time" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/math" cmp "github.com/litmuschaos/litmus-go/pkg/probe/comparator" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -36,12 +36,12 @@ func preparePromProbe(probe v1alpha1.ProbeAttributes, clients clients.ClientSets return err } default: - return errors.Errorf("phase '%s' not supported in the prom probe", phase) + return cerrors.Error{ErrorCode: cerrors.ErrorTypePromProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("phase '%s' not supported in the prom probe", phase)} } return nil } -//preChaosPromProbe trigger the prometheus probe for prechaos phase +// preChaosPromProbe trigger the prometheus probe for prechaos phase func preChaosPromProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.ResultDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { switch strings.ToLower(probe.Mode) { @@ -93,7 +93,7 @@ func preChaosPromProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Resu return nil } -//postChaosPromProbe trigger the prometheus probe for postchaos phase +// postChaosPromProbe trigger the prometheus probe for postchaos phase func postChaosPromProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.ResultDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { switch strings.ToLower(probe.Mode) { @@ -139,7 +139,7 @@ func postChaosPromProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Res return nil } -//onChaosPromProbe trigger the prom probe for DuringChaos phase +// onChaosPromProbe trigger the prom probe for DuringChaos phase func onChaosPromProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.ResultDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { switch strings.ToLower(probe.Mode) { @@ -181,7 +181,7 @@ func triggerPromProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Resul } else if probe.PromProbeInputs.QueryPath != "" { command = "promql --host " + probe.PromProbeInputs.Endpoint + " \"$(cat " + probe.PromProbeInputs.QueryPath + ")\"" + " --output csv" } else { - return errors.Errorf("[Probe]: Any one of query or queryPath is required") + return cerrors.Error{ErrorCode: cerrors.ErrorTypePromProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: "[Probe]: Any one of query or queryPath is required"} } var out, errOut bytes.Buffer @@ -190,11 +190,11 @@ func triggerPromProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Resul cmd.Stdout = &out cmd.Stderr = &errOut if err := cmd.Run(); err != nil { - return fmt.Errorf("unable to run command, err: %v; error output: %v", err, errOut.String()) + return cerrors.Error{ErrorCode: cerrors.ErrorTypePromProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: fmt.Sprintf("unable to run command, err: %v; error output: %v", err, errOut.String())} } // extract the values from the metrics - value, err := extractValueFromMetrics(strings.TrimSpace(out.String())) + value, err := extractValueFromMetrics(strings.TrimSpace(out.String()), probe.Name) if err != nil { return err } @@ -205,7 +205,8 @@ func triggerPromProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Resul FirstValue(value). SecondValue(probe.PromProbeInputs.Comparator.Value). Criteria(probe.PromProbeInputs.Comparator.Criteria). - CompareFloat(); err != nil { + ProbeName(probe.Name). + CompareFloat(cerrors.ErrorTypePromProbe); err != nil { log.Errorf("The %v prom probe has been Failed, err: %v", probe.Name, err) return err } @@ -230,6 +231,7 @@ loop: err = triggerPromProbe(probe, chaosresult) // record the error inside the probeDetails, we are maintaining a dedicated variable for the err, inside probeDetails if err != nil { + err = addProbePhase(err, string(chaosDetails.Phase)) for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err @@ -278,6 +280,7 @@ loop: default: // record the error inside the probeDetails, we are maintaining a dedicated variable for the err, inside probeDetails if err = triggerPromProbe(probe, chaosresult); err != nil { + err = addProbePhase(err, string(chaosDetails.Phase)) for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err @@ -302,7 +305,7 @@ loop: } // extractValueFromMetrics extract the value field from the prometheus metrix -func extractValueFromMetrics(metrics string) (string, error) { +func extractValueFromMetrics(metrics, probeName string) (string, error) { // spliting the metrics based on newline as metrics may have multiple entries rows := strings.Split(metrics, "\n") @@ -310,9 +313,9 @@ func extractValueFromMetrics(metrics string) (string, error) { // output should contains exact one metrics entry along with header // erroring out the cases where it contains more or less entries if len(rows) > 2 { - return "", errors.Errorf("metrics entries can't be more than two") + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypePromProbe, Target: fmt.Sprintf("{name: %v}", probeName), Reason: "metrics entries can't be more than two"} } else if len(rows) < 2 { - return "", errors.Errorf("metrics doesn't contains required values") + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypePromProbe, Target: fmt.Sprintf("{name: %v}", probeName), Reason: "metrics doesn't contains required values"} } // deriving the index for the value column from the headers @@ -325,13 +328,13 @@ func extractValueFromMetrics(metrics string) (string, error) { } } if indexForValueColumn == -1 { - return "", errors.Errorf("metrics entries doesn't contains value column") + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypePromProbe, Target: fmt.Sprintf("{name: %v}", probeName), Reason: "metrics entries doesn't contains value column"} } // splitting the metrics entries which are available as comma separated values := strings.Split(rows[1], ",") if values[indexForValueColumn] == "" { - return "", errors.Errorf("error while parsing value from derived matrics") + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypePromProbe, Target: fmt.Sprintf("{name: %v}", probeName), Reason: "error while parsing value from derived matrics"} } return values[indexForValueColumn], nil } diff --git a/pkg/result/chaosresult.go b/pkg/result/chaosresult.go index c8ff4d792..d949b868e 100644 --- a/pkg/result/chaosresult.go +++ b/pkg/result/chaosresult.go @@ -3,11 +3,15 @@ package result import ( "bytes" "context" + "fmt" "os/exec" "strconv" "strings" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" + "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" clients "github.com/litmuschaos/litmus-go/pkg/clients" @@ -16,17 +20,16 @@ import ( "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" k8serrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//ChaosResult Create and Update the chaos result +// ChaosResult Create and Update the chaos result func ChaosResult(chaosDetails *types.ChaosDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, state string) error { experimentLabel := map[string]string{} - // It try to get the chaosresult, if available - // it will retries until it got chaos result or met the timeout(3 mins) + // It tries to get the chaosresult, if available + // it will retry until it got chaos result or met the timeout(3 mins) isResultAvailable := false if err := retry. Times(90). @@ -34,7 +37,7 @@ func ChaosResult(chaosDetails *types.ChaosDetails, clients clients.ClientSets, r Try(func(attempt uint) error { _, err := clients.LitmusClient.ChaosResults(chaosDetails.ChaosNamespace).Get(context.Background(), resultDetails.Name, v1.GetOptions{}) if err != nil && !k8serrors.IsNotFound(err) { - return errors.Errorf("unable to get %v chaosresult in %v namespace, err: %v", resultDetails.Name, chaosDetails.ChaosNamespace, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosResultCRUD, Target: fmt.Sprintf("{name: %s, namespace: %s}", resultDetails.Name, chaosDetails.ChaosNamespace), Reason: err.Error()} } else if err == nil { isResultAvailable = true } @@ -49,7 +52,7 @@ func ChaosResult(chaosDetails *types.ChaosDetails, clients clients.ClientSets, r // Getting chaos pod label and passing it in chaos result chaosPod, err := clients.KubeClient.CoreV1().Pods(chaosDetails.ChaosNamespace).Get(context.Background(), chaosDetails.ChaosPodName, v1.GetOptions{}) if err != nil { - return errors.Errorf("failed to find chaos pod with name: %v, err: %v", chaosDetails.ChaosPodName, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{name: %s, namespace: %s}", chaosDetails.ChaosPodName, chaosDetails.ChaosNamespace), Reason: fmt.Sprintf("failed to get experiment pod :%s", err.Error())} } experimentLabel = chaosPod.Labels } @@ -71,7 +74,7 @@ func ChaosResult(chaosDetails *types.ChaosDetails, clients clients.ClientSets, r return PatchChaosResult(clients, chaosDetails, resultDetails, experimentLabel) } -//InitializeChaosResult create the chaos result +// InitializeChaosResult create the chaos result func InitializeChaosResult(chaosDetails *types.ChaosDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, chaosResultLabel map[string]string) error { _, probeStatus := GetProbeStatus(resultDetails) @@ -107,24 +110,24 @@ func InitializeChaosResult(chaosDetails *types.ChaosDetails, clients clients.Cli // if the chaos result is already present, it will patch the new parameters with the existing chaos result CR // Note: We have added labels inside chaos result and looking for matching labels to list the chaos-result - // these labels were not present inside earlier releases so giving a retry/update if someone have a exiting result CR + // these labels were not present inside earlier releases so giving a retry/update if someone has an exiting result CR // in his cluster, which was created earlier with older release/version of litmus. // it will override the params and add the labels to it so that it will work as desired. if k8serrors.IsAlreadyExists(err) { - chaosResult, err = clients.LitmusClient.ChaosResults(chaosDetails.ChaosNamespace).Get(context.Background(), resultDetails.Name, v1.GetOptions{}) + _, err = clients.LitmusClient.ChaosResults(chaosDetails.ChaosNamespace).Get(context.Background(), resultDetails.Name, v1.GetOptions{}) if err != nil { - return errors.Errorf("Unable to find the chaosresult with name %v in %v namespace, err: %v", resultDetails.Name, chaosDetails.ChaosNamespace, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosResultCRUD, Target: fmt.Sprintf("{name: %s, namespace: %s}", resultDetails.Name, chaosDetails.ChaosNamespace), Reason: err.Error()} } // updating the chaosresult with new values if err = PatchChaosResult(clients, chaosDetails, resultDetails, chaosResultLabel); err != nil { - return err + return stacktrace.Propagate(err, "could not update chaos result") } } return nil } -//GetProbeStatus fetch status of all probes +// GetProbeStatus fetch status of all probes func GetProbeStatus(resultDetails *types.ResultDetails) (bool, []v1alpha1.ProbeStatuses) { isAllProbePassed := true @@ -143,17 +146,42 @@ func GetProbeStatus(resultDetails *types.ResultDetails) (bool, []v1alpha1.ProbeS return isAllProbePassed, probeStatus } +func getFailStep(probeDetails []types.ProbeDetails, phase string) (string, string) { + var ( + errList []string + errCode cerrors.ErrorType + rootCause string + ) + for _, probe := range probeDetails { + if probe.IsProbeFailedWithError != nil { + rootCause, errCode = cerrors.GetRootCauseAndErrorCode(probe.IsProbeFailedWithError, phase) + errList = append(errList, rootCause) + } + } + + if len(errList) != 0 { + if len(errList) == 1 { + return errList[0], string(errCode) + } + return fmt.Sprintf("[%v]", strings.Join(errList, ",")), string(cerrors.ErrorTypeGeneric) + } + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "probe didn't met the passing criteria"}.Error(), string(cerrors.ErrorTypeGeneric) +} + func updateResultAttributes(clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, chaosResultLabel map[string]string) (*v1alpha1.ChaosResult, error) { result, err := GetChaosStatus(resultDetails, chaosDetails, clients) if err != nil { - return nil, err + return nil, stacktrace.Propagate(err, "could not get chaos status") } updateHistory(result) var isAllProbePassed bool result.Status.ExperimentStatus.Phase = resultDetails.Phase result.Spec.InstanceID = chaosDetails.InstanceID - result.Status.ExperimentStatus.FailStep = resultDetails.FailStep + if resultDetails.FailureOutput != nil || resultDetails.Phase == v1alpha1.ResultPhaseRunning { + result.Status.ExperimentStatus.FailureOutput = resultDetails.FailureOutput + } + // for existing chaos result resource it will patch the label result.ObjectMeta.Labels = chaosResultLabel result.Status.History.Targets = chaosDetails.Targets @@ -165,7 +193,11 @@ func updateResultAttributes(clients clients.ClientSets, chaosDetails *types.Chao if !isAllProbePassed { resultDetails.Verdict = "Fail" result.Status.ExperimentStatus.Verdict = "Fail" - result.Status.ExperimentStatus.FailStep = "Probe execution result didn't met the passing criteria" + failStep, errCode := getFailStep(resultDetails.ProbeDetails, string(chaosDetails.Phase)) + result.Status.ExperimentStatus.FailureOutput = &v1alpha1.FailureOutput{ + FailedStep: failStep, + ErrorCode: errCode, + } } switch strings.ToLower(string(resultDetails.Verdict)) { case "pass": @@ -194,16 +226,16 @@ func updateResultAttributes(clients clients.ClientSets, chaosDetails *types.Chao return result, nil } -//PatchChaosResult Update the chaos result +// PatchChaosResult Update the chaos result func PatchChaosResult(clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, chaosResultLabel map[string]string) error { result, err := updateResultAttributes(clients, chaosDetails, resultDetails, chaosResultLabel) if err != nil { - return err + return stacktrace.Propagate(err, "could not update chaosresult attributes") } // It will update the existing chaos-result CR with new values - // it will retries until it will be able to update successfully or met the timeout(3 mins) + // it will retry until it will be able to update successfully or met the timeout(3 mins) return retry. Times(uint(chaosDetails.Timeout / chaosDetails.Delay)). Wait(time.Duration(chaosDetails.Delay) * time.Second). @@ -213,10 +245,10 @@ func PatchChaosResult(clients clients.ClientSets, chaosDetails *types.ChaosDetai if k8serrors.IsConflict(updateErr) { result, err = updateResultAttributes(clients, chaosDetails, resultDetails, chaosResultLabel) if err != nil { - return err + return stacktrace.Propagate(err, "could not update chaosresult attributes") } } - return errors.Errorf("Unable to update the chaosresult, err: %v", updateErr) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosResultCRUD, Phase: getExperimentPhaseFromResultPhase(resultDetails.Phase), Target: fmt.Sprintf("{name: %s, namespace: %s}", resultDetails.Name, chaosDetails.ChaosNamespace), Reason: updateErr.Error()} } return nil }) @@ -227,29 +259,36 @@ func SetResultUID(resultDetails *types.ResultDetails, clients clients.ClientSets result, err := clients.LitmusClient.ChaosResults(chaosDetails.ChaosNamespace).Get(context.Background(), resultDetails.Name, v1.GetOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosResultCRUD, Target: fmt.Sprintf("{name: %s, namespace: %s}", resultDetails.Name, chaosDetails.ChaosNamespace), Reason: err.Error()} } resultDetails.ResultUID = result.UID return nil } -//RecordAfterFailure update the chaosresult and create the summary events -func RecordAfterFailure(chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, failStep string, clients clients.ClientSets, eventsDetails *types.EventDetails) { +// RecordAfterFailure update the chaosresult and create the summary events +func RecordAfterFailure(chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, err error, clients clients.ClientSets, eventsDetails *types.EventDetails) { + failStep, errorCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase)) // update the chaos result - types.SetResultAfterCompletion(resultDetails, "Fail", "Completed", failStep) - ChaosResult(chaosDetails, clients, resultDetails, "EOT") + types.SetResultAfterCompletion(resultDetails, "Fail", "Completed", failStep, errorCode) + if err := ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil { + log.Errorf("failed to update chaosresult, err: %v", err) + } // add the summary event in chaos result msg := "experiment: " + chaosDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) types.SetResultEventAttributes(eventsDetails, types.FailVerdict, msg, "Warning", resultDetails) - events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosResult") + if err := events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosResult"); err != nil { + log.Errorf("failed to create %v event inside chaosresult", types.FailVerdict) + } // add the summary event in chaos engine if chaosDetails.EngineName != "" { types.SetEngineEventAttributes(eventsDetails, types.Summary, msg, "Warning", chaosDetails) - events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine") + if err := events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine"); err != nil { + log.Errorf("failed to create %v event inside chaosengine", types.Summary) + } } } @@ -275,7 +314,7 @@ func AnnotateChaosResult(resultName, namespace, status, kind, name string) error command.Stderr = &stderr if err := command.Run(); err != nil { log.Infof("Error String: %v", stderr.String()) - return errors.Errorf("unable to annotate the %v chaosresult, err: %v", resultName, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosResultCRUD, Target: fmt.Sprintf("{name: %s, namespace: %s}", resultName, namespace), Reason: out.String()} } return nil } @@ -285,7 +324,7 @@ func GetChaosStatus(resultDetails *types.ResultDetails, chaosDetails *types.Chao result, err := clients.LitmusClient.ChaosResults(chaosDetails.ChaosNamespace).Get(context.Background(), resultDetails.Name, v1.GetOptions{}) if err != nil { - return nil, err + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosResultCRUD, Target: fmt.Sprintf("{name: %s, namespace: %s}", resultDetails.Name, chaosDetails.ChaosNamespace), Reason: err.Error()} } annotations := result.ObjectMeta.Annotations targetList := chaosDetails.Targets @@ -310,6 +349,35 @@ func GetChaosStatus(resultDetails *types.ResultDetails, chaosDetails *types.Chao return result, nil } +func UpdateFailedStepFromHelper(resultDetails *types.ResultDetails, chaosDetails *types.ChaosDetails, client clients.ClientSets, err error) error { + rootCause, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase)) + return retry. + Times(uint(chaosDetails.Timeout / chaosDetails.Delay)). + Wait(time.Duration(chaosDetails.Delay) * time.Second). + Try(func(attempt uint) error { + chaosResult, err := client.LitmusClient.ChaosResults(chaosDetails.ChaosNamespace).Get(context.Background(), resultDetails.Name, v1.GetOptions{}) + if err != nil { + return err + } + if chaosResult.Status.ExperimentStatus.FailureOutput != nil { + chaosResult.Status.ExperimentStatus.FailureOutput.FailedStep = appendFailStep(chaosResult.Status.ExperimentStatus.FailureOutput.FailedStep, rootCause) + } else { + chaosResult.Status.ExperimentStatus.FailureOutput = &v1alpha1.FailureOutput{ + FailedStep: rootCause, + ErrorCode: string(errCode), + } + } + _, err = client.LitmusClient.ChaosResults(chaosDetails.ChaosNamespace).Update(context.Background(), chaosResult, v1.UpdateOptions{}) + return err + }) +} + +func appendFailStep(failStep string, rootCause string) string { + failStep = strings.TrimPrefix(failStep, "[") + failStep = strings.TrimSuffix(failStep, "]") + return fmt.Sprintf("[%s,%s]", failStep, rootCause) +} + // updates the chaos status of targets which is already present inside history.targets func updateTargets(name, status string, data []v1alpha1.TargetDetails) bool { for i := range data { @@ -320,3 +388,15 @@ func updateTargets(name, status string, data []v1alpha1.TargetDetails) bool { } return false } + +func getExperimentPhaseFromResultPhase(phase v1alpha1.ResultPhase) string { + switch phase { + case v1alpha1.ResultPhaseRunning: + return "PreChaos" + case v1alpha1.ResultPhaseCompleted: + return "PostChaos" + case v1alpha1.ResultPhaseStopped: + return "Abort" + } + return "" +} diff --git a/pkg/spring-boot/spring-boot-chaos/environment/environment.go b/pkg/spring-boot/spring-boot-chaos/environment/environment.go index 43a660ba3..5ba3e0f45 100644 --- a/pkg/spring-boot/spring-boot-chaos/environment/environment.go +++ b/pkg/spring-boot/spring-boot-chaos/environment/environment.go @@ -19,7 +19,6 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails, expName string experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "10")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") diff --git a/pkg/spring-boot/spring-boot-chaos/types/types.go b/pkg/spring-boot/spring-boot-chaos/types/types.go index 4a6765997..92a98cc1e 100644 --- a/pkg/spring-boot/spring-boot-chaos/types/types.go +++ b/pkg/spring-boot/spring-boot-chaos/types/types.go @@ -12,7 +12,6 @@ type ExperimentDetails struct { ChaosDuration int ChaosInterval int RampTime int - ChaosLib string AppNS string AppLabel string AppKind string diff --git a/pkg/status/application.go b/pkg/status/application.go index ac2ebf357..bffe47ac6 100644 --- a/pkg/status/application.go +++ b/pkg/status/application.go @@ -2,15 +2,18 @@ package status import ( "context" + "fmt" "strings" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" + clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/retry" "github.com/litmuschaos/litmus-go/pkg/workloads" - "github.com/pkg/errors" logrus "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,19 +34,19 @@ func AUTStatusCheck(clients clients.ClientSets, chaosDetails *types.ChaosDetails case "pod": for _, name := range target.Names { if err := CheckApplicationStatusesByPodName(target.Namespace, name, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check application status by pod names") } } default: if target.Labels != nil { for _, label := range target.Labels { if err := CheckApplicationStatusesByLabels(target.Namespace, label, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check application status by labels") } } } else { if err := CheckApplicationStatusesByWorkloadName(target, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check application status by workload names") } } } @@ -63,12 +66,12 @@ func CheckApplicationStatusesByLabels(appNs, appLabel string, timeout, delay int // Checking whether application containers are in ready state log.Info("[Status]: Checking whether application containers are in ready state") if err := CheckContainerStatus(appNs, appLabel, "", timeout, delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check container status") } // Checking whether application pods are in running state log.Info("[Status]: Checking whether application pods are in running state") if err := CheckPodStatus(appNs, appLabel, timeout, delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check pod status") } } return nil @@ -82,7 +85,7 @@ func CheckAuxiliaryApplicationStatus(AuxiliaryAppDetails string, timeout, delay for _, val := range AuxiliaryAppInfo { AppInfo := strings.Split(val, ":") if err := CheckApplicationStatusesByLabels(AppInfo[0], AppInfo[1], timeout, delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check auxiliary application status") } } return nil @@ -96,15 +99,15 @@ func CheckPodStatusPhase(appNs, appLabel string, timeout, delay int, clients cli Try(func(attempt uint) error { podList, err := clients.KubeClient.CoreV1().Pods(appNs).List(context.Background(), metav1.ListOptions{LabelSelector: appLabel}) if err != nil { - return errors.Errorf("Unable to find the pods with matching labels, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podLabels: %s, namespace: %s}", appLabel, appNs), Reason: err.Error()} } else if len(podList.Items) == 0 { - errors.Errorf("Unable to find the pods with matching labels") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podLabels: %s, namespace: %s}", appLabel, appNs), Reason: "no pod found with matching labels"} } for _, pod := range podList.Items { isInState := isOneOfState(string(pod.Status.Phase), states) if !isInState { - return errors.Errorf("Pod is not yet in %v state(s)", states) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, appNs), Reason: fmt.Sprintf("pod is not in [%v] states", states)} } log.InfoWithValues("[Status]: The status of Pods are as follows", logrus.Fields{ "Pod": pod.Name, "Status": pod.Status.Phase}) @@ -137,9 +140,9 @@ func CheckContainerStatus(appNs, appLabel, containerName string, timeout, delay Try(func(attempt uint) error { podList, err := clients.KubeClient.CoreV1().Pods(appNs).List(context.Background(), metav1.ListOptions{LabelSelector: appLabel}) if err != nil { - return errors.Errorf("Unable to find the pods with matching labels, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podLabels: %s, namespace: %v}", appLabel, appNs), Reason: err.Error()} } else if len(podList.Items) == 0 { - return errors.Errorf("Unable to find the pods with matching labels") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podLabels: %s, namespace: %v}", appLabel, appNs), Reason: "no pod found with matching labels"} } for _, pod := range podList.Items { switch containerName { @@ -162,10 +165,10 @@ func validateContainerStatus(containerName, podName string, ContainerStatuses [] for _, container := range ContainerStatuses { if container.Name == containerName { if container.State.Terminated != nil { - return errors.Errorf("container is in terminated state") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("podName: %s, containerName: %s", podName, containerName), Reason: "container is in terminated state"} } if !container.Ready { - return errors.Errorf("containers are not yet in running state") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("podName: %s, containerName: %s", podName, containerName), Reason: "container is not in running state"} } log.InfoWithValues("[Status]: The Container status are as follows", logrus.Fields{ "container": container.Name, "Pod": podName, "Readiness": container.Ready}) @@ -189,20 +192,20 @@ func WaitForCompletion(appNs, appLabel string, clients clients.ClientSets, durat var podStatus string failedPods := 0 // It will wait till the completion of target container - // it will retries until the target container completed or met the timeout(chaos duration) + // it will retry until the target container completed or met the timeout(chaos duration) err := retry. Times(uint(duration)). Wait(1 * time.Second). Try(func(attempt uint) error { podList, err := clients.KubeClient.CoreV1().Pods(appNs).List(context.Background(), metav1.ListOptions{LabelSelector: appLabel}) if err != nil { - return errors.Errorf("Unable to find the pods with matching labels, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", appLabel, appNs), Reason: err.Error()} } else if len(podList.Items) == 0 { - return errors.Errorf("Unable to find the pods with matching labels") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", appLabel, appNs), Reason: "no pod with matching label"} } - // it will check for the status of helper pod, if it is Succeeded and target container is completed then it will marked it as completed and return + // it will check for the status of helper pod, if it is Succeeded and target container is completed then it will mark it as completed and return // if it is still running then it will check for the target container, as we can have multiple container inside helper pod (istio) - // if the target container is in completed state(ready flag is false), then we will marked the helper pod as completed + // if the target container is in completed state(ready flag is false), then we will mark the helper pod as completed // we will retry till it met the timeout(chaos duration) failedPods = 0 for _, pod := range podList.Items { @@ -212,7 +215,7 @@ func WaitForCompletion(appNs, appLabel string, clients clients.ClientSets, durat for _, container := range pod.Status.ContainerStatuses { if container.Name == containerName { if container.Ready { - return errors.Errorf("Container is not completed yet") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", pod.Name, pod.Namespace, container.Name), Reason: "container is not completed within timeout"} } else if container.State.Terminated != nil && container.State.Terminated.ExitCode == 1 { podStatus = "Failed" break @@ -221,7 +224,7 @@ func WaitForCompletion(appNs, appLabel string, clients clients.ClientSets, durat } } if podStatus == "Pending" { - return errors.Errorf("pod is in pending state") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: "pod is in pending state"} } log.InfoWithValues("[Status]: The running status of Pods are as follows", logrus.Fields{ "Pod": pod.Name, "Status": podStatus}) @@ -247,9 +250,9 @@ func CheckHelperStatus(appNs, appLabel string, timeout, delay int, clients clien Try(func(attempt uint) error { podList, err := clients.KubeClient.CoreV1().Pods(appNs).List(context.Background(), metav1.ListOptions{LabelSelector: appLabel}) if err != nil { - return errors.Errorf("unable to find the pods with matching labels, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", appLabel, appNs), Reason: fmt.Sprintf("helper status check failed: %s", err.Error())} } else if len(podList.Items) == 0 { - errors.Errorf("Unable to find the pods with matching labels") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", appLabel, appNs), Reason: "helper status check failed: no pods found with mathcing labels"} } for _, pod := range podList.Items { podStatus := string(pod.Status.Phase) @@ -257,11 +260,11 @@ func CheckHelperStatus(appNs, appLabel string, timeout, delay int, clients clien case "running", "succeeded", "failed": log.Infof("%v helper pod is in %v state", pod.Name, podStatus) default: - return errors.Errorf("%v pod is in %v state", pod.Name, podStatus) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("helper pod is in %s state", podStatus)} } for _, container := range pod.Status.ContainerStatuses { if container.State.Terminated != nil && container.State.Terminated.Reason != "Completed" && container.State.Terminated.Reason != "Error" { - return errors.Errorf("container is terminated with %v reason", container.State.Terminated.Reason) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("helper pod's container is in terminated state with %s reason", container.State.Terminated.Reason)} } } } @@ -276,11 +279,11 @@ func CheckPodStatusByPodName(appNs, appName string, timeout, delay int, clients Try(func(attempt uint) error { pod, err := clients.KubeClient.CoreV1().Pods(appNs).Get(context.Background(), appName, metav1.GetOptions{}) if err != nil { - return errors.Errorf("Unable to find the pods with matching labels, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("podName: %v, namespace: %v", appName, appNs), Reason: err.Error()} } if pod.Status.Phase != v1.PodRunning { - return errors.Errorf("Pod is not yet in %v state(s)", v1.PodRunning) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("podName: %v, namespace: %v", appName, appNs), Reason: "pod is not in Running state"} } log.InfoWithValues("[Status]: The status of Pods are as follows", logrus.Fields{ "Pod": pod.Name, "Status": pod.Status.Phase}) @@ -296,7 +299,7 @@ func CheckAllContainerStatusesByPodName(appNs, appName string, timeout, delay in Try(func(attempt uint) error { pod, err := clients.KubeClient.CoreV1().Pods(appNs).Get(context.Background(), appName, metav1.GetOptions{}) if err != nil { - return errors.Errorf("Unable to find the pods with matching labels, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("podName: %v, namespace: %v", appName, appNs), Reason: err.Error()} } if err := validateAllContainerStatus(pod.Name, pod.Status.ContainerStatuses); err != nil { return err @@ -309,11 +312,11 @@ func CheckApplicationStatusesByWorkloadName(target types.AppDetails, timeout, de pods, err := workloads.GetPodsFromWorkloads(target, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not get pods from workloads") } for _, pod := range pods.Items { if err := CheckApplicationStatusesByPodName(target.Namespace, pod.Name, timeout, delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check application status by pod name") } } return nil @@ -323,12 +326,12 @@ func CheckUnTerminatedPodStatusesByWorkloadName(target types.AppDetails, timeout pods, err := workloads.GetPodsFromWorkloads(target, clients) if err != nil { - return err + return stacktrace.Propagate(err, "could not get pods by workload names") } for _, pod := range pods.Items { if pod.DeletionTimestamp == nil { if err := CheckApplicationStatusesByPodName(target.Namespace, pod.Name, timeout, delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check application status by pod name") } } } @@ -339,12 +342,12 @@ func CheckApplicationStatusesByPodName(appNs, pod string, timeout, delay int, cl // Checking whether application containers are in ready state log.Info("[Status]: Checking whether application containers are in ready state") if err := CheckAllContainerStatusesByPodName(appNs, pod, timeout, delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check container statuses by pod name") } // Checking whether application pods are in running state log.Info("[Status]: Checking whether application pods are in running state") if err := CheckPodStatusByPodName(appNs, pod, timeout, delay, clients); err != nil { - return err + return stacktrace.Propagate(err, "could not check pod status by pod name") } return nil } diff --git a/pkg/status/nodes.go b/pkg/status/nodes.go index 1504cfe80..e287bf858 100644 --- a/pkg/status/nodes.go +++ b/pkg/status/nodes.go @@ -2,16 +2,17 @@ package status import ( "context" + "fmt" "strings" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + clients "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" logrus "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -28,18 +29,14 @@ func CheckNodeStatus(nodes string, timeout, delay int, clients clients.ClientSet for index := range targetNodes { node, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), targetNodes[index], metav1.GetOptions{}) if err != nil { - if apierrors.IsNotFound(err) { - return errors.Errorf("[Info]: The node: %v does not exist", targetNodes[index]) - } else { - return err - } + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{nodeName: %s}", targetNodes[index]), Reason: err.Error()} } nodeList.Items = append(nodeList.Items, *node) } } else { nodes, err := clients.KubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("failed to list all nodes: %s", err.Error())} } nodeList = *nodes } @@ -53,7 +50,7 @@ func CheckNodeStatus(nodes string, timeout, delay int, clients clients.ClientSet } } if !isReady { - return errors.Errorf("Node is not in ready state") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{nodeName: %s}", node.Name), Reason: "node is not in ready state"} } log.InfoWithValues("The Node status are as follows", logrus.Fields{ "Node": node.Name, "Ready": isReady}) @@ -70,7 +67,7 @@ func CheckNodeNotReadyState(nodeName string, timeout, delay int, clients clients Try(func(attempt uint) error { node, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) if err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{nodeName: %s}", nodeName), Reason: err.Error()} } conditions := node.Status.Conditions isReady := false @@ -80,9 +77,9 @@ func CheckNodeNotReadyState(nodeName string, timeout, delay int, clients clients break } } - // It will retries until the node becomes NotReady + // It will retry until the node becomes NotReady if isReady { - return errors.Errorf("Node is not in NotReady state") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{nodeName: %s}", nodeName), Reason: "node is not in NotReady state during chaos"} } log.InfoWithValues("The Node status are as follows", logrus.Fields{ "Node": node.Name, "Ready": isReady}) diff --git a/pkg/types/types.go b/pkg/types/types.go index d1a7ed207..dfe9fcfe7 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -1,6 +1,7 @@ package types import ( + "github.com/litmuschaos/litmus-go/pkg/cerrors" "os" "strconv" "strings" @@ -29,11 +30,19 @@ const ( AbortVerdict string = "Abort" ) +type ExperimentPhase string + +const ( + PreChaosPhase ExperimentPhase = "PreChaos" + PostChaosPhase ExperimentPhase = "PostChaos" + ChaosInjectPhase ExperimentPhase = "ChaosInject" +) + // ResultDetails is for collecting all the chaos-result-related details type ResultDetails struct { Name string Verdict v1alpha1.ResultVerdict - FailStep string + FailureOutput *v1alpha1.FailureOutput Phase v1alpha1.ResultPhase ResultUID clientTypes.UID ProbeDetails []ProbeDetails @@ -93,6 +102,7 @@ type ChaosDetails struct { Resources corev1.ResourceRequirements ImagePullSecrets []corev1.LocalObjectReference Labels map[string]string + Phase ExperimentPhase } type ParentResource struct { @@ -161,13 +171,13 @@ func InitialiseChaosVariables(chaosDetails *ChaosDetails) { chaosDetails.ProbeImagePullPolicy = Getenv("LIB_IMAGE_PULL_POLICY", "Always") chaosDetails.ParentsResources = []ParentResource{} chaosDetails.Targets = []v1alpha1.TargetDetails{} + chaosDetails.Phase = PreChaosPhase } //SetResultAttributes initialise all the chaos result ENV func SetResultAttributes(resultDetails *ResultDetails, chaosDetails ChaosDetails) { resultDetails.Verdict = "Awaited" resultDetails.Phase = "Running" - resultDetails.FailStep = "N/A" resultDetails.PassedProbeCount = 0 if chaosDetails.EngineName != "" { resultDetails.Name = chaosDetails.EngineName + "-" + chaosDetails.ExperimentName @@ -182,10 +192,15 @@ func SetResultAttributes(resultDetails *ResultDetails, chaosDetails ChaosDetails } //SetResultAfterCompletion set all the chaos result ENV in the EOT -func SetResultAfterCompletion(resultDetails *ResultDetails, verdict v1alpha1.ResultVerdict, phase v1alpha1.ResultPhase, failStep string) { +func SetResultAfterCompletion(resultDetails *ResultDetails, verdict v1alpha1.ResultVerdict, phase v1alpha1.ResultPhase, failStep string, errorCode cerrors.ErrorType) { resultDetails.Verdict = verdict resultDetails.Phase = phase - resultDetails.FailStep = failStep + if errorCode != cerrors.ErrorTypeHelperPodFailed { + resultDetails.FailureOutput = &v1alpha1.FailureOutput{ + FailedStep: failStep, + ErrorCode: string(errorCode), + } + } } //SetEngineEventAttributes initialise attributes for event generation in chaos engine diff --git a/pkg/utils/common/common.go b/pkg/utils/common/common.go index 427d7bf8c..d6e776439 100644 --- a/pkg/utils/common/common.go +++ b/pkg/utils/common/common.go @@ -1,9 +1,13 @@ package common import ( + "bytes" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "math/rand" "os" + "os/exec" "os/signal" "reflect" "strconv" @@ -17,7 +21,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/math" "github.com/litmuschaos/litmus-go/pkg/result" "github.com/litmuschaos/litmus-go/pkg/types" - "github.com/pkg/errors" apiv1 "k8s.io/api/core/v1" ) @@ -43,7 +46,7 @@ func RandomInterval(interval string) error { lowerBound, _ = strconv.Atoi(intervals[0]) upperBound, _ = strconv.Atoi(intervals[1]) default: - return errors.Errorf("unable to parse CHAOS_INTERVAL, provide in valid format") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "could not parse CHAOS_INTERVAL env, invalid format"} } rand.Seed(time.Now().UnixNano()) waitTime := lowerBound + rand.Intn(upperBound-lowerBound) @@ -84,7 +87,7 @@ func AbortWatcherWithoutExit(expname string, clients clients.ClientSets, resultD log.Info("[Chaos]: Chaos Experiment Abortion started because of terminated signal received") // updating the chaosresult after stopped failStep := "Chaos injection stopped!" - types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep) + types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, cerrors.ErrorTypeExperimentAborted) if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil { log.Errorf("[ABORT]: Failed to update result, err: %v", err) } @@ -159,11 +162,14 @@ func getEnvSource(apiVersion string, fieldPath string) apiv1.EnvVarSource { } // HelperFailedError return the helper pod error message -func HelperFailedError(err error) error { +func HelperFailedError(err error, appLabel, namespace string, podLevel bool) error { if err != nil { - return errors.Errorf("helper pod failed, err: %v", err) + return stacktrace.Propagate(err, "helper pod failed") } - return errors.Errorf("helper pod failed") + if podLevel { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelperPodFailed, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", appLabel, namespace), Reason: "helper pod failed"} + } + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", appLabel, namespace), Reason: "helper pod failed"} } // GetStatusMessage returns the event message @@ -236,3 +242,18 @@ func Contains(val interface{}, slice interface{}) bool { } return false } + +func RunBashCommand(command string, failMsg string, source string) error { + cmd := exec.Command("/bin/bash", "-c", command) + return RunCLICommands(cmd, source, "", failMsg, cerrors.ErrorTypeHelper) +} + +func RunCLICommands(cmd *exec.Cmd, source, target, failMsg string, errorCode cerrors.ErrorType) error { + var out, stdErr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stdErr + if err = cmd.Run(); err != nil { + return cerrors.Error{ErrorCode: errorCode, Target: target, Source: source, Reason: fmt.Sprintf("%s: %s", failMsg, stdErr.String())} + } + return nil +} diff --git a/pkg/utils/common/nodes.go b/pkg/utils/common/nodes.go index 8d89ef935..77951d0f5 100644 --- a/pkg/utils/common/nodes.go +++ b/pkg/utils/common/nodes.go @@ -2,6 +2,9 @@ package common import ( "context" + "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "math/rand" "strconv" "strings" @@ -10,16 +13,13 @@ import ( "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/math" - "github.com/litmuschaos/litmus-go/pkg/status" - "github.com/litmuschaos/litmus-go/pkg/utils/retry" - "github.com/pkg/errors" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var err error -//GetNodeList check for the availibilty of the application node for the chaos execution +//GetNodeList check for the availability of the application node for the chaos execution // if the application node is not defined it will derive the random target node list using node affected percentage func GetNodeList(nodeNames, nodeLabel string, nodeAffPerc int, clients clients.ClientSets) ([]string, error) { @@ -33,18 +33,14 @@ func GetNodeList(nodeNames, nodeLabel string, nodeAffPerc int, clients clients.C switch nodeLabel { case "": - nodes, err = clients.KubeClient.CoreV1().Nodes().List(context.Background(), v1.ListOptions{}) + nodes, err = getAllNodes(clients) if err != nil { - return nil, errors.Errorf("Failed to find the nodes, err: %v", err) - } else if len(nodes.Items) == 0 { - return nil, errors.Errorf("Failed to find the nodes") + return nil, stacktrace.Propagate(err, "could not get all nodes") } default: - nodes, err = clients.KubeClient.CoreV1().Nodes().List(context.Background(), v1.ListOptions{LabelSelector: nodeLabel}) + nodes, err = getNodesByLabels(nodeLabel, clients) if err != nil { - return nil, errors.Errorf("Failed to find the nodes with matching label, err: %v", err) - } else if len(nodes.Items) == 0 { - return nil, errors.Errorf("Failed to find the nodes with matching label") + return nil, stacktrace.Propagate(err, "could not get nodes by labels") } } @@ -71,20 +67,18 @@ func GetNodeName(namespace, labels, nodeLabel string, clients clients.ClientSets case "": podList, err := clients.KubeClient.CoreV1().Pods(namespace).List(context.Background(), v1.ListOptions{LabelSelector: labels}) if err != nil { - return "", errors.Wrapf(err, "Failed to find the application pods with matching labels in %v namespace, err: %v", namespace, err) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", labels, namespace), Reason: err.Error()} } else if len(podList.Items) == 0 { - return "", errors.Errorf("Failed to find the application pods with matching labels in %v namespace", namespace) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", labels, namespace), Reason: "no pod found with matching labels"} } rand.Seed(time.Now().Unix()) randomIndex := rand.Intn(len(podList.Items)) return podList.Items[randomIndex].Spec.NodeName, nil default: - nodeList, err := clients.KubeClient.CoreV1().Nodes().List(context.Background(), v1.ListOptions{LabelSelector: nodeLabel}) + nodeList, err := getNodesByLabels(nodeLabel, clients) if err != nil { - return "", errors.Wrapf(err, "Failed to find the target nodes with matching labels in %v namespace, err: %v", namespace, err) - } else if len(nodeList.Items) == 0 { - return "", errors.Wrapf(err, "Failed to find the target nodes with matching labels in %v namespace", namespace) + return "", stacktrace.Propagate(err, "could not get nodes by labels") } rand.Seed(time.Now().Unix()) randomIndex := rand.Intn(len(nodeList.Items)) @@ -92,62 +86,22 @@ func GetNodeName(namespace, labels, nodeLabel string, clients clients.ClientSets } } -// PreChaosNodeStatusCheck fetches all the nodes in the cluster and checks their status, and fetches the total active nodes in the cluster, prior to the chaos experiment -func PreChaosNodeStatusCheck(timeout, delay int, clients clients.ClientSets) (int, error) { +func getAllNodes(clients clients.ClientSets) (*apiv1.NodeList, error) { nodeList, err := clients.KubeClient.CoreV1().Nodes().List(context.Background(), v1.ListOptions{}) if err != nil { - return 0, errors.Errorf("fail to get the nodes, err: %v", err) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("failed to list all nodes: %s", err.Error())} + } else if len(nodeList.Items) == 0 { + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no node found!"} } - for _, node := range nodeList.Items { - if err = status.CheckNodeStatus(node.Name, timeout, delay, clients); err != nil { - log.Infof("[Info]: The cluster is unhealthy this might not work, due to %v", err) - } - } - activeNodeCount, err := getActiveNodeCount(clients) - if err != nil { - return 0, errors.Errorf("fail to get the total active node count pre chaos, err: %v", err) - } - - return activeNodeCount, nil -} - -// PostChaosActiveNodeCountCheck checks the number of active nodes post chaos and validates the number of healthy node count post chaos -func PostChaosActiveNodeCountCheck(activeNodeCount, timeout, delay int, clients clients.ClientSets) error { - err := retry. - Times(uint(timeout / delay)). - Wait(time.Duration(delay) * time.Second). - Try(func(attempt uint) error { - - activeNodes, err := getActiveNodeCount(clients) - if err != nil { - return errors.Errorf("fail to get the total active nodes, err: %v", err) - } - if activeNodeCount != activeNodes { - return errors.Errorf("fail to get equal active node post chaos") - } - return nil - }) - return err + return nodeList, nil } -// getActiveNodeCount fetches the target node and total node count from the cluster -func getActiveNodeCount(clients clients.ClientSets) (int, error) { - nodeList, err := clients.KubeClient.CoreV1().Nodes().List(context.Background(), v1.ListOptions{}) +func getNodesByLabels(nodeLabel string, clients clients.ClientSets) (*apiv1.NodeList, error) { + nodeList, err := clients.KubeClient.CoreV1().Nodes().List(context.Background(), v1.ListOptions{LabelSelector: nodeLabel}) if err != nil { - return 0, errors.Errorf("fail to get the nodes, err: %v", err) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{nodeLabel: %s}", nodeLabel), Reason: err.Error()} + } else if len(nodeList.Items) == 0 { + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{nodeLabel: %s}", nodeLabel), Reason: "no node found with matching labels"} } - - nodeCount := 0 - for _, node := range nodeList.Items { - - conditions := node.Status.Conditions - for _, condition := range conditions { - if condition.Type == apiv1.NodeReady && condition.Status == apiv1.ConditionTrue { - nodeCount++ - } - } - } - log.Infof("[Info]: Total number active nodes are: %v", nodeCount) - - return nodeCount, nil + return nodeList, nil } diff --git a/pkg/utils/common/pid.go b/pkg/utils/common/pid.go index 03361d581..8ff32eec1 100644 --- a/pkg/utils/common/pid.go +++ b/pkg/utils/common/pid.go @@ -1,14 +1,16 @@ package common import ( + "bytes" "encoding/json" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "os/exec" "strconv" "strings" "github.com/litmuschaos/litmus-go/pkg/log" - "github.com/pkg/errors" ) // CrictlInspectResponse JSON representation of crictl inspect command output @@ -50,34 +52,33 @@ type StateDetails struct { PID int `json:"pid"` } -func getDockerPID(containerID, socketPath string) (int, error) { - host := "unix://" + socketPath - // deriving pid from the inspect out of target container - out, err := exec.Command("sudo", "docker", "--host", host, "inspect", containerID).CombinedOutput() +func getDockerPID(containerID, socketPath, source string) (int, error) { + cmd := exec.Command("sudo", "docker", "--host", fmt.Sprintf("unix://%s", socketPath), "inspect", containerID) + out, err := inspect(cmd, containerID, source) if err != nil { - log.Error(fmt.Sprintf("[docker]: Failed to run docker inspect: %s", string(out))) - return 0, err + return 0, stacktrace.Propagate(err, "could not inspect container id") } + // in docker, pid is present inside state.pid attribute of inspect output var resp []DockerInspectResponse if err := json.Unmarshal(out, &resp); err != nil { - return 0, err + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeContainerRuntime, Source: source, Target: fmt.Sprintf("containerID: %s", containerID), Reason: fmt.Sprintf("failed to parse pid: %s", err.Error())} } pid := resp[0].State.PID return pid, nil } -func getContainerdSandboxPID(containerID, socketPath string) (int, error) { +func getContainerdSandboxPID(containerID, socketPath, source string) (int, error) { var pid int - endpoint := "unix://" + socketPath - out, err := exec.Command("sudo", "crictl", "-i", endpoint, "-r", endpoint, "inspect", containerID).CombinedOutput() + cmd := exec.Command("sudo", "crictl", "-i", fmt.Sprintf("unix://%s", socketPath), "-r", fmt.Sprintf("unix://%s", socketPath), "inspect", containerID) + out, err := inspect(cmd, containerID, source) if err != nil { - log.Error(fmt.Sprintf("[cri]: Failed to run crictl: %s", string(out))) - return 0, err + return 0, stacktrace.Propagate(err, "could not inspect container id") } + var resp CrictlInspectResponse if err := json.Unmarshal(out, &resp); err != nil { - return 0, err + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeContainerRuntime, Source: source, Target: fmt.Sprintf("containerID: %s", containerID), Reason: fmt.Sprintf("failed to parse pid: %s", err.Error())} } for _, namespace := range resp.Info.RuntimeSpec.Linux.Namespaces { if namespace.Type == "network" { @@ -88,42 +89,42 @@ func getContainerdSandboxPID(containerID, socketPath string) (int, error) { return pid, nil } -func getContainerdPID(containerID, socketPath string) (int, error) { +func getContainerdPID(containerID, socketPath, source string) (int, error) { var pid int - endpoint := "unix://" + socketPath - out, err := exec.Command("sudo", "crictl", "-i", endpoint, "-r", endpoint, "inspect", containerID).CombinedOutput() + cmd := exec.Command("sudo", "crictl", "-i", fmt.Sprintf("unix://%s", socketPath), "-r", fmt.Sprintf("unix://%s", socketPath), "inspect", containerID) + out, err := inspect(cmd, containerID, source) if err != nil { - log.Error(fmt.Sprintf("[cri]: Failed to run crictl: %s", string(out))) - return 0, err + return 0, stacktrace.Propagate(err, "could not inspect container id") } + var resp CrictlInspectResponse if err := json.Unmarshal(out, &resp); err != nil { - return 0, err + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeContainerRuntime, Source: source, Target: fmt.Sprintf("{containerID: %s}", containerID), Reason: fmt.Sprintf("failed to parse pid: %s", err.Error())} } pid = resp.Info.PID if pid == 0 { - return 0, errors.Errorf("[cri]: No running target container found, pid: %d", pid) + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeContainerRuntime, Source: source, Target: fmt.Sprintf("{containerID: %s}", containerID), Reason: fmt.Sprintf("no running target container found")} } return pid, nil } -func getCRIOPID(containerID, socketPath string) (int, error) { +func getCRIOPID(containerID, socketPath, source string) (int, error) { var pid int - endpoint := "unix://" + socketPath - out, err := exec.Command("sudo", "crictl", "-i", endpoint, "-r", endpoint, "inspect", containerID).CombinedOutput() + cmd := exec.Command("sudo", "crictl", "-i", fmt.Sprintf("unix://%s", socketPath), "-r", fmt.Sprintf("unix://%s", socketPath), "inspect", containerID) + out, err := inspect(cmd, containerID, source) if err != nil { - log.Error(fmt.Sprintf("[cri]: Failed to run crictl: %s", string(out))) - return 0, err + return 0, stacktrace.Propagate(err, "could not inspect container id") } + var info InfoDetails if err := json.Unmarshal(out, &info); err != nil { - return 0, err + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeContainerRuntime, Source: source, Target: fmt.Sprintf("containerID: %s", containerID), Reason: fmt.Sprintf("failed to parse pid: %s", err.Error())} } pid = info.PID if pid == 0 { var resp CrictlInspectResponse if err := json.Unmarshal(out, &resp); err != nil { - return 0, err + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeContainerRuntime, Source: source, Target: fmt.Sprintf("containerID: %s", containerID), Reason: fmt.Sprintf("failed to parse pid: %s", err.Error())} } pid = resp.Info.PID } @@ -131,34 +132,44 @@ func getCRIOPID(containerID, socketPath string) (int, error) { } //GetPauseAndSandboxPID extract out the PID of the target container -func GetPauseAndSandboxPID(runtime, containerID, socketPath string) (int, error) { +func GetPauseAndSandboxPID(runtime, containerID, socketPath, source string) (int, error) { var pid int switch runtime { case "docker": - pid, err = getDockerPID(containerID, socketPath) + pid, err = getDockerPID(containerID, socketPath, source) case "containerd": - pid, err = getContainerdSandboxPID(containerID, socketPath) + pid, err = getContainerdSandboxPID(containerID, socketPath, source) case "crio": - pid, err = getCRIOPID(containerID, socketPath) + pid, err = getCRIOPID(containerID, socketPath, source) default: - return 0, errors.Errorf("%v container runtime not suported", runtime) + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: source, Reason: fmt.Sprintf("unsupported container runtime: %s", runtime)} } if err != nil { return 0, err } if pid == 0 { - return 0, errors.Errorf("[cri]: No running target container found, pid: %d", pid) + return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: source, Target: fmt.Sprintf("containerID: %s", containerID), Reason: fmt.Sprintf("no running target container found")} } log.Info(fmt.Sprintf("[Info]: Container ID=%s has process PID=%d", containerID, pid)) return pid, nil } -func GetPID(runtime, containerID, socketPath string) (int, error) { +func GetPID(runtime, containerID, socketPath, source string) (int, error) { if runtime == "containerd" { - return getContainerdPID(containerID, socketPath) + return getContainerdPID(containerID, socketPath, source) + } + return GetPauseAndSandboxPID(runtime, containerID, socketPath, source) +} + +func inspect(cmd *exec.Cmd, containerID, source string) ([]byte, error) { + var out, stdErr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stdErr + if err := cmd.Run(); err != nil { + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeContainerRuntime, Source: source, Target: fmt.Sprintf("{containerID: %s}", containerID), Reason: fmt.Sprintf("failed to get container pid: %s", stdErr.String())} } - return GetPauseAndSandboxPID(runtime, containerID, socketPath) + return out.Bytes(), nil } diff --git a/pkg/utils/common/pods.go b/pkg/utils/common/pods.go index d2030d12e..7e7d3df87 100644 --- a/pkg/utils/common/pods.go +++ b/pkg/utils/common/pods.go @@ -3,6 +3,8 @@ package common import ( "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/palantir/stacktrace" "math/rand" "os" "os/exec" @@ -17,7 +19,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/retry" "github.com/litmuschaos/litmus-go/pkg/workloads" - "github.com/pkg/errors" "github.com/sirupsen/logrus" core_v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -28,53 +29,40 @@ import ( func DeletePod(podName, podLabel, namespace string, timeout, delay int, clients clients.ClientSets) error { if err := clients.KubeClient.CoreV1().Pods(namespace).Delete(context.Background(), podName, v1.DeleteOptions{}); err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", podName, namespace), Reason: fmt.Sprintf("failed to delete helper pod: %s", err.Error())} } - // waiting for the termination of the pod - return retry. - Times(uint(timeout / delay)). - Wait(time.Duration(delay) * time.Second). - Try(func(attempt uint) error { - podSpec, err := clients.KubeClient.CoreV1().Pods(namespace).List(context.Background(), v1.ListOptions{LabelSelector: podLabel}) - if err != nil { - return errors.Errorf("Unable to delete the pod, err: %v", err) - } else if len(podSpec.Items) != 0 { - return errors.Errorf("Unable to delete the pod") - } - return nil - }) + return waitForPodTermination(podLabel, namespace, timeout, delay, clients) } //DeleteAllPod deletes all the pods with matching labels and wait until all the pods got terminated func DeleteAllPod(podLabel, namespace string, timeout, delay int, clients clients.ClientSets) error { if err := clients.KubeClient.CoreV1().Pods(namespace).DeleteCollection(context.Background(), v1.DeleteOptions{}, v1.ListOptions{LabelSelector: podLabel}); err != nil { - return err + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", podLabel, namespace), Reason: fmt.Sprintf("failed to delete helper pod(s): %s", err.Error())} } - // waiting for the termination of the pod + return waitForPodTermination(podLabel, namespace, timeout, delay, clients) +} + +func waitForPodTermination(podLabel, namespace string, timeout, delay int, clients clients.ClientSets) error { return retry. Times(uint(timeout / delay)). Wait(time.Duration(delay) * time.Second). Try(func(attempt uint) error { podSpec, err := clients.KubeClient.CoreV1().Pods(namespace).List(context.Background(), v1.ListOptions{LabelSelector: podLabel}) if err != nil { - return errors.Errorf("Unable to delete the pods, err: %v", err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", podLabel, namespace), Reason: fmt.Sprintf("failed to list helper pod(s): %s", err.Error())} } else if len(podSpec.Items) != 0 { - return errors.Errorf("Unable to delete the pods") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", podLabel, namespace), Reason: "helper pod(s) are not deleted within timeout"} } return nil }) } // getChaosPodResourceRequirements will return the resource requirements on chaos pod -func getChaosPodResourceRequirements(podName, containerName, namespace string, clients clients.ClientSets) (core_v1.ResourceRequirements, error) { +func getChaosPodResourceRequirements(pod *core_v1.Pod, containerName string) (core_v1.ResourceRequirements, error) { - pod, err := clients.KubeClient.CoreV1().Pods(namespace).Get(context.Background(), podName, v1.GetOptions{}) - if err != nil { - return core_v1.ResourceRequirements{}, err - } for _, container := range pod.Spec.Containers { // The name of chaos container is always same as job name // - @@ -82,14 +70,14 @@ func getChaosPodResourceRequirements(podName, containerName, namespace string, c return container.Resources, nil } } - return core_v1.ResourceRequirements{}, errors.Errorf("No container found with %v name in target pod", containerName) + return core_v1.ResourceRequirements{}, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, containerName: %s, namespace: %s}", pod.Name, containerName, pod.Namespace), Reason: "no container found with in target pod"} } // SetHelperData derive the data from experiment pod and sets into experimentDetails struct // which can be used to create helper pod func SetHelperData(chaosDetails *types.ChaosDetails, setHelperData string, clients clients.ClientSets) error { var pod *core_v1.Pod - pod, err = clients.KubeClient.CoreV1().Pods(chaosDetails.ChaosNamespace).Get(context.Background(), chaosDetails.ChaosPodName, v1.GetOptions{}) + pod, err := GetExperimentPod(chaosDetails.ChaosPodName, chaosDetails.ChaosNamespace, clients) if err != nil { return err } @@ -113,9 +101,9 @@ func SetHelperData(chaosDetails *types.ChaosDetails, setHelperData string, clien chaosDetails.ImagePullSecrets = pod.Spec.ImagePullSecrets // Get Resource Requirements - chaosDetails.Resources, err = getChaosPodResourceRequirements(chaosDetails.ChaosPodName, chaosDetails.ExperimentName, chaosDetails.ChaosNamespace, clients) + chaosDetails.Resources, err = getChaosPodResourceRequirements(pod, chaosDetails.ExperimentName) if err != nil { - return errors.Errorf("unable to get resource requirements, err: %v", err) + return stacktrace.Propagate(err, "could not inherit resource requirements") } return nil } @@ -136,12 +124,12 @@ func VerifyExistanceOfPods(namespace, pods string, clients clients.ClientSets) ( podList := strings.Split(strings.TrimSpace(pods), ",") for index := range podList { - isPodsAvailable, err := CheckForAvailibiltyOfPod(namespace, podList[index], clients) + isPodsAvailable, err := CheckForAvailabilityOfPod(namespace, podList[index], clients) if err != nil { return false, err } if !isPodsAvailable { - return isPodsAvailable, errors.Errorf("%v pod is not available in %v namespace", podList[index], namespace) + return isPodsAvailable, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{podName: %s, namespace: %s}", podList[index], namespace), Reason: "pod doesn't exist set by TARGET_PODS ENV"} } } return true, nil @@ -160,7 +148,7 @@ func GetPodList(targetPods string, podAffPerc int, clients clients.ClientSets, c isPodsAvailable, err := VerifyExistanceOfPods(namespace, targetPods, clients) if err != nil { - return core_v1.PodList{}, err + return core_v1.PodList{}, stacktrace.Propagate(err, "could not verify existence of TARGET_PODS") } // getting the pod, if the target pods is defined @@ -169,21 +157,21 @@ func GetPodList(targetPods string, podAffPerc int, clients clients.ClientSets, c case true: podList, err := GetTargetPodsWhenTargetPodsENVSet(targetPods, namespace, clients, chaosDetails) if err != nil { - return core_v1.PodList{}, err + return core_v1.PodList{}, stacktrace.Propagate(err, "could not get target pods when TARGET_PODS env set") } finalPods.Items = append(finalPods.Items, podList.Items...) default: podList, err := GetTargetPodsWhenTargetPodsENVNotSet(podAffPerc, clients, chaosDetails) if err != nil { - return core_v1.PodList{}, err + return core_v1.PodList{}, stacktrace.Propagate(err, "could not get target pods when TARGET_PODS env not set") } finalPods.Items = append(finalPods.Items, podList.Items...) } return finalPods, nil } -// CheckForAvailibiltyOfPod check the availibility of the specified pod -func CheckForAvailibiltyOfPod(namespace, name string, clients clients.ClientSets) (bool, error) { +// CheckForAvailabilityOfPod check the availability of the specified pod +func CheckForAvailabilityOfPod(namespace, name string, clients clients.ClientSets) (bool, error) { if name == "" { return false, nil @@ -193,7 +181,7 @@ func CheckForAvailibiltyOfPod(namespace, name string, clients clients.ClientSets if err != nil && k8serrors.IsNotFound(err) { return false, nil } else if err != nil { - return false, err + return false, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{podName: %s, namespace: %s}", name, namespace), Reason: err.Error()} } return true, nil } @@ -203,9 +191,9 @@ func CheckForAvailibiltyOfPod(namespace, name string, clients clients.ClientSets func FilterNonChaosPods(ns, labels string, clients clients.ClientSets, chaosDetails *types.ChaosDetails) (core_v1.PodList, error) { podList, err := clients.KubeClient.CoreV1().Pods(ns).List(context.Background(), v1.ListOptions{LabelSelector: labels}) if err != nil { - return core_v1.PodList{}, err + return core_v1.PodList{}, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", labels, ns), Reason: err.Error()} } else if len(podList.Items) == 0 { - return core_v1.PodList{}, errors.Wrapf(err, "Failed to find the pod with matching labels in %v namespace", ns) + return core_v1.PodList{}, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", labels, ns), Reason: "could not find pods with matching labels"} } nonChaosPods := core_v1.PodList{} // ignore chaos pods @@ -225,7 +213,7 @@ func GetTargetPodsWhenTargetPodsENVSet(targetPods, namespace string, clients cli for index := range targetPodsList { pod, err := clients.KubeClient.CoreV1().Pods(namespace).Get(context.Background(), strings.TrimSpace(targetPodsList[index]), v1.GetOptions{}) if err != nil { - return core_v1.PodList{}, errors.Wrapf(err, "Failed to get %v pod in %v namespace", targetPodsList[index], namespace) + return core_v1.PodList{}, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{podName: %s, namespace: %s}", targetPodsList[index], namespace), Reason: err.Error()} } realPods.Items = append(realPods.Items, *pod) } @@ -274,7 +262,7 @@ func GetTargetPodsWhenTargetPodsENVNotSet(podAffPerc int, clients clients.Client // select random pod from ns pods, err := FilterNonChaosPods(chaosDetails.AppDetail[0].Namespace, "", clients, chaosDetails) if err != nil { - return finalPods, err + return finalPods, stacktrace.Propagate(err, "could not filter non chaos pods") } return filterPodsByPercentage(pods, podAffPerc), nil } @@ -285,7 +273,7 @@ func GetTargetPodsWhenTargetPodsENVNotSet(podAffPerc int, clients clients.Client for _, name := range target.Names { pod, err := clients.KubeClient.CoreV1().Pods(target.Namespace).Get(context.Background(), name, v1.GetOptions{}) if err != nil { - return finalPods, err + return finalPods, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{podName: %s, namespace: %s}", name, target.Namespace), Reason: err.Error()} } finalPods.Items = append(finalPods.Items, *pod) } @@ -294,14 +282,14 @@ func GetTargetPodsWhenTargetPodsENVNotSet(podAffPerc int, clients clients.Client if target.Names != nil { pods, err := workloads.GetPodsFromWorkloads(target, clients) if err != nil { - return finalPods, err + return finalPods, stacktrace.Propagate(err, "could not get pods from workloads") } finalPods.Items = append(finalPods.Items, pods.Items...) } else { for _, label := range target.Labels { pods, err := clients.KubeClient.CoreV1().Pods(target.Namespace).List(context.Background(), v1.ListOptions{LabelSelector: label}) if err != nil { - return finalPods, err + return finalPods, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{podLabel: %s, namespace: %s}", label, target.Namespace), Reason: err.Error()} } finalPods.Items = append(finalPods.Items, pods.Items...) } @@ -310,7 +298,7 @@ func GetTargetPodsWhenTargetPodsENVNotSet(podAffPerc int, clients clients.Client } if len(finalPods.Items) == 0 { - return finalPods, errors.Errorf("No target pod found") + return finalPods, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: GetAppDetailsForLogging(chaosDetails.AppDetail), Reason: "no target pods found"} } if podKind { @@ -360,29 +348,28 @@ func DeleteAllHelperPodBasedOnJobCleanupPolicy(podLabel string, chaosDetails *ty // GetServiceAccount derive the serviceAccountName for the helper pod func GetServiceAccount(chaosNamespace, chaosPodName string, clients clients.ClientSets) (string, error) { - pod, err := clients.KubeClient.CoreV1().Pods(chaosNamespace).Get(context.Background(), chaosPodName, v1.GetOptions{}) + pod, err := GetExperimentPod(chaosPodName, chaosNamespace, clients) if err != nil { return "", err } return pod.Spec.ServiceAccountName, nil } -//GetTargetContainer will fetch the container name from application pod -//This container will be used as target container -func GetTargetContainer(appNamespace, appName string, clients clients.ClientSets) (string, error) { - pod, err := clients.KubeClient.CoreV1().Pods(appNamespace).Get(context.Background(), appName, v1.GetOptions{}) +// GetExperimentPod fetch the experiment pod +func GetExperimentPod(name, namespace string, clients clients.ClientSets) (*core_v1.Pod, error) { + pod, err := clients.KubeClient.CoreV1().Pods(namespace).Get(context.Background(), name, v1.GetOptions{}) if err != nil { - return "", err + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", name, namespace), Reason: fmt.Sprintf("failed to get experiment pod: %s", err.Error())} } - return pod.Spec.Containers[0].Name, nil + return pod, nil } //GetContainerID derive the container id of the application container -func GetContainerID(appNamespace, targetPod, targetContainer string, clients clients.ClientSets) (string, error) { +func GetContainerID(appNamespace, targetPod, targetContainer string, clients clients.ClientSets, source string) (string, error) { pod, err := clients.KubeClient.CoreV1().Pods(appNamespace).Get(context.Background(), targetPod, v1.GetOptions{}) if err != nil { - return "", err + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", targetPod, appNamespace), Reason: err.Error()} } var containerID string @@ -395,11 +382,14 @@ func GetContainerID(appNamespace, targetPod, targetContainer string, clients cli break } } + if containerID == "" { + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeContainerRuntime, Source: source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", targetPod, appNamespace, targetContainer), Reason: fmt.Sprintf("no container found with specified name")} + } return containerID, nil } //GetRuntimeBasedContainerID extract out the container id of the target container based on the container runtime -func GetRuntimeBasedContainerID(containerRuntime, socketPath, targetPods, appNamespace, targetContainer string, clients clients.ClientSets) (string, error) { +func GetRuntimeBasedContainerID(containerRuntime, socketPath, targetPods, appNamespace, targetContainer string, clients clients.ClientSets, source string) (string, error) { var containerID string switch containerRuntime { @@ -409,17 +399,17 @@ func GetRuntimeBasedContainerID(containerRuntime, socketPath, targetPods, appNam cmd := "sudo docker --host " + host + " ps | grep k8s_POD_" + targetPods + "_" + appNamespace + " | awk '{print $1}'" out, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { - log.Errorf("[docker]: Failed to run docker ps command: %s", string(out)) - return "", err + log.Errorf("[docker]: Failed to run docker ps command: %s", err.Error()) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeContainerRuntime, Source: source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", targetPods, appNamespace, targetContainer), Reason: fmt.Sprintf("failed to get container id :%s", string(out))} } containerID = strings.TrimSpace(string(out)) case "containerd", "crio": - containerID, err = GetContainerID(appNamespace, targetPods, targetContainer, clients) + containerID, err = GetContainerID(appNamespace, targetPods, targetContainer, clients, source) if err != nil { - return "", err + return "", stacktrace.Propagate(err, "could not get container id") } default: - return "", errors.Errorf("%v container runtime not suported", containerRuntime) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: source, Reason: fmt.Sprintf("unsupported container runtime: %s", containerRuntime)} } log.Infof("Container ID: %v", containerID) @@ -427,18 +417,18 @@ func GetRuntimeBasedContainerID(containerRuntime, socketPath, targetPods, appNam } // CheckContainerStatus checks the status of the application container -func CheckContainerStatus(appNamespace, appName string, timeout, delay int, clients clients.ClientSets) error { +func CheckContainerStatus(appNamespace, appName string, timeout, delay int, clients clients.ClientSets, source string) error { return retry. Times(uint(timeout / delay)). Wait(time.Duration(delay) * time.Second). Try(func(attempt uint) error { pod, err := clients.KubeClient.CoreV1().Pods(appNamespace).Get(context.Background(), appName, v1.GetOptions{}) if err != nil { - return errors.Errorf("unable to find the pod with name %v, err: %v", appName, err) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", appName, appNamespace), Reason: err.Error()} } for _, container := range pod.Status.ContainerStatuses { if !container.Ready { - return errors.Errorf("containers are not yet in running state") + return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", appName, appNamespace, container.Name), Reason: "target container is not in running state"} } log.InfoWithValues("The running status of container are as follows", logrus.Fields{ "container": container.Name, "Pod": pod.Name, "Status": pod.Status.Phase}) @@ -454,9 +444,9 @@ func GetPodListFromSpecifiedNodes(podAffPerc int, nodeLabel string, clients clie // identify node list from the provided node label nodes, err = clients.KubeClient.CoreV1().Nodes().List(context.Background(), v1.ListOptions{LabelSelector: nodeLabel}) if err != nil { - return core_v1.PodList{}, errors.Errorf("Failed to find the nodes with matching label, err: %v", err) + return core_v1.PodList{}, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{nodeLabel: %s}", nodeLabel), Reason: err.Error()} } else if len(nodes.Items) == 0 { - return core_v1.PodList{}, errors.Errorf("Failed to find the nodes with matching label") + return core_v1.PodList{}, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{nodeLabel: %s}", nodeLabel), Reason: "no nodes found with matching labels"} } nodeNames := []string{} for _, node := range nodes.Items { @@ -489,7 +479,7 @@ func getTargetPodsWhenNodeFilterSet(podAffPerc int, pods core_v1.PodList, nodes } if len(nodeFilteredPods.Items) == 0 { - return nodeFilteredPods, errors.Errorf("No pod found with desired attributes on specified node(s)") + return nodeFilteredPods, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{nodes: %v}", nodes), Reason: "no pod found on specified node(s)"} } return filterPodsByPercentage(nodeFilteredPods, podAffPerc), nil @@ -504,7 +494,7 @@ func GetTargetPods(nodeLabel, targetPods, podsAffectedPerc string, clients clien if nodeLabel != "" && targetPods == "" { pods, err = GetPodListFromSpecifiedNodes(podAffectedPerc, nodeLabel, clients, chaosDetails) if err != nil { - return core_v1.PodList{}, err + return core_v1.PodList{}, stacktrace.Propagate(err, "could not list pods from specified nodes") } } else { if targetPods != "" && nodeLabel != "" { @@ -581,17 +571,17 @@ type target struct { TargetContainer string } -func ParseTargets() (*TargetsDetails, error) { +func ParseTargets(source string) (*TargetsDetails, error) { var targets TargetsDetails targetEnv := os.Getenv("TARGETS") if targetEnv == "" { - return nil, fmt.Errorf("no target found, provide atleast one target") + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: source, Reason: "no target found, provide atleast one target"} } for _, t := range strings.Split(targetEnv, ";") { targetList := strings.Split(t, ":") if len(targetList) != 3 { - return nil, fmt.Errorf("unsupported target: '%v', provide target in '::", targetList) + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: source, Reason: fmt.Sprintf("unsupported target format: '%v'", targetList)} } targets.Target = append(targets.Target, target{ Name: targetList[0], @@ -601,3 +591,18 @@ func ParseTargets() (*TargetsDetails, error) { } return &targets, nil } + +func GetAppDetailsForLogging(appDetails []types.AppDetails) string { + var result []string + for _, k := range appDetails { + if k.Labels != nil { + result = append(result, fmt.Sprintf("{namespace: %s, kind: %s, labels: %s}", k.Namespace, k.Kind, k.Labels)) + continue + } + result = append(result, fmt.Sprintf("{namespace: %s, kind: %s, names: %s}", k.Namespace, k.Kind, k.Names)) + } + if len(result) != 0 { + return fmt.Sprintf("[%v]", strings.Join(result, ",")) + } + return "" +} diff --git a/pkg/utils/exec/exec.go b/pkg/utils/exec/exec.go index ff02b37b9..4bb521421 100644 --- a/pkg/utils/exec/exec.go +++ b/pkg/utils/exec/exec.go @@ -4,11 +4,10 @@ import ( "bytes" "context" "fmt" - "os" "strings" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" - "github.com/pkg/errors" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -28,7 +27,7 @@ func Exec(commandDetails *PodDetails, clients clients.ClientSets, command []stri pod, err := clients.KubeClient.CoreV1().Pods(commandDetails.Namespace).Get(context.Background(), commandDetails.PodName, v1.GetOptions{}) if err != nil { - return "", errors.Errorf("unable to get %v pod in %v namespace, err: %v", commandDetails.PodName, commandDetails.Namespace, err) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to get %v pod in %v namespace, err: %v", commandDetails.PodName, commandDetails.Namespace, err)} } if err := checkPodStatus(pod, commandDetails.ContainerName); err != nil { return "", err @@ -41,7 +40,7 @@ func Exec(commandDetails *PodDetails, clients clients.ClientSets, command []stri SubResource("exec") scheme := runtime.NewScheme() if err := apiv1.AddToScheme(scheme); err != nil { - return "", fmt.Errorf("error adding to scheme: %v", err) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("error adding to scheme: %v", err)} } // NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back. @@ -60,30 +59,30 @@ func Exec(commandDetails *PodDetails, clients clients.ClientSets, command []stri // multiplexed bidirectional streams. exec, err := remotecommand.NewSPDYExecutor(clients.KubeConfig, "POST", req.URL()) if err != nil { - return "", fmt.Errorf("error while creating Executor: %v", err) + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("error while creating Executor: %v", err)} } // storing the output inside the output buffer for future use - var out bytes.Buffer - stdout := &out - stderr := os.Stderr + var stdout, stderr bytes.Buffer // Stream will initiate the transport of the standard shell streams and return an error if a problem occurs. - err = exec.Stream(remotecommand.StreamOptions{ + if err = exec.Stream(remotecommand.StreamOptions{ Stdin: nil, - Stdout: stdout, - Stderr: stderr, + Stdout: &stdout, + Stderr: &stderr, Tty: false, - }) + }); err != nil { + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to create a stderr and stdout stream, %s", err.Error())} + } - if err != nil { - return "", err + if strings.TrimSpace(stderr.String()) != "" { + return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: stderr.String()} } - return out.String(), nil + return stdout.String(), nil } -//SetExecCommandAttributes initialise all the pod details to run exec command +// SetExecCommandAttributes initialise all the pod details to run exec command func SetExecCommandAttributes(podDetails *PodDetails, PodName, ContainerName, Namespace string) { podDetails.ContainerName = ContainerName @@ -95,11 +94,11 @@ func SetExecCommandAttributes(podDetails *PodDetails, PodName, ContainerName, Na func checkPodStatus(pod *apiv1.Pod, containerName string) error { if strings.ToLower(string(pod.Status.Phase)) != "running" { - return errors.Errorf("%v pod is not in running state, phase: %v", pod.Name, pod.Status.Phase) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("%v pod is not in running state, phase: %v", pod.Name, pod.Status.Phase)} } for _, container := range pod.Status.ContainerStatuses { if container.Name == containerName && !container.Ready { - return errors.Errorf("%v container of %v pod is not in ready state, phase: %v", container.Name, pod.Name, pod.Status.Phase) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("%v container of %v pod is not in ready state, phase: %v", container.Name, pod.Name, pod.Status.Phase)} } } return nil diff --git a/pkg/vmware/vm-poweroff/environment/environment.go b/pkg/vmware/vm-poweroff/environment/environment.go index 525d7c003..355f7e215 100644 --- a/pkg/vmware/vm-poweroff/environment/environment.go +++ b/pkg/vmware/vm-poweroff/environment/environment.go @@ -2,6 +2,7 @@ package environment import ( "strconv" + "strings" clientTypes "k8s.io/apimachinery/pkg/types" @@ -9,7 +10,7 @@ import ( experimentTypes "github.com/litmuschaos/litmus-go/pkg/vmware/vm-poweroff/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "vm-poweroff") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") @@ -17,16 +18,13 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30")) experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "")) - experimentDetails.ChaosLib = types.Getenv("LIB", "litmus") experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", "")) experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "") - experimentDetails.AuxiliaryAppInfo = types.Getenv("AUXILIARY_APPINFO", "") - experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "") experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2")) experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180")) experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel") - experimentDetails.VMIds = types.Getenv("APP_VM_MOIDS", "") + experimentDetails.VMIds = strings.TrimSpace(types.Getenv("APP_VM_MOIDS", "")) experimentDetails.VcenterServer = types.Getenv("VCENTERSERVER", "") experimentDetails.VcenterUser = types.Getenv("VCENTERUSER", "") experimentDetails.VcenterPass = types.Getenv("VCENTERPASS", "") diff --git a/pkg/vmware/vm-poweroff/types/types.go b/pkg/vmware/vm-poweroff/types/types.go index 94c34853b..58dee5722 100644 --- a/pkg/vmware/vm-poweroff/types/types.go +++ b/pkg/vmware/vm-poweroff/types/types.go @@ -9,23 +9,20 @@ import ( // ExperimentDetails is for collecting all the experiment-related details type ExperimentDetails struct { - ExperimentName string - EngineName string - ChaosDuration int - ChaosInterval int - RampTime int - ChaosLib string - ChaosUID clientTypes.UID - InstanceID string - ChaosNamespace string - ChaosPodName string - Timeout int - Delay int - Sequence string - VMIds string - VcenterServer string - VcenterUser string - VcenterPass string - AuxiliaryAppInfo string - TargetContainer string + ExperimentName string + EngineName string + ChaosDuration int + ChaosInterval int + RampTime int + ChaosUID clientTypes.UID + InstanceID string + ChaosNamespace string + ChaosPodName string + Timeout int + Delay int + Sequence string + VMIds string + VcenterServer string + VcenterUser string + VcenterPass string } diff --git a/pkg/workloads/workloads.go b/pkg/workloads/workloads.go index a93d7996b..89e5c499e 100644 --- a/pkg/workloads/workloads.go +++ b/pkg/workloads/workloads.go @@ -3,8 +3,11 @@ package workloads import ( "context" + "fmt" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/types" + "github.com/palantir/stacktrace" "strings" kcorev1 "k8s.io/api/core/v1" @@ -38,23 +41,30 @@ func GetPodsFromWorkloads(target types.AppDetails, client clients.ClientSets) (k allPods, err := getAllPods(target.Namespace, client) if err != nil { - return kcorev1.PodList{}, err + return kcorev1.PodList{}, stacktrace.Propagate(err, "could not get all pods") } return getPodsFromWorkload(target, allPods, client.DynamicClient) } func getPodsFromWorkload(target types.AppDetails, allPods *kcorev1.PodList, dynamicClient dynamic.Interface) (kcorev1.PodList, error) { var pods kcorev1.PodList - for _, r := range allPods.Items { - ownerType, ownerName, err := GetPodOwnerTypeAndName(&r, dynamicClient) - if err != nil { - return pods, err + for _, wld := range target.Names { + found := false + for _, r := range allPods.Items { + ownerType, ownerName, err := GetPodOwnerTypeAndName(&r, dynamicClient) + if err != nil { + return pods, err + } + if ownerName == "" || ownerType == "" { + continue + } + if target.Kind == ownerType && wld == ownerName { + found = true + pods.Items = append(pods.Items, r) + } } - if ownerName == "" || ownerType == "" { - continue - } - if matchPodOwnerWithWorkloads(ownerName, ownerType, target) { - pods.Items = append(pods.Items, r) + if !found { + return pods, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{namespace: %s, kind: %s, name: %s}", target.Namespace, target.Kind, wld), Reason: "no pod found for specified target"} } } return pods, nil @@ -81,7 +91,7 @@ func GetPodOwnerTypeAndName(pod *kcorev1.Pod, dynamicClient dynamic.Interface) ( func getParent(name, namespace string, gvr schema.GroupVersionResource, dynamicClient dynamic.Interface) (string, string, error) { res, err := dynamicClient.Resource(gvr).Namespace(namespace).Get(context.Background(), name, v1.GetOptions{}) if err != nil { - return "", "", err + return "", "", cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{namespace: %s, kind: %s, name: %s}", namespace, gvr.Resource, name), Reason: err.Error()} } for _, v := range res.GetOwnerReferences() { @@ -93,18 +103,10 @@ func getParent(name, namespace string, gvr schema.GroupVersionResource, dynamicC return "", "", nil } -func matchPodOwnerWithWorkloads(name, kind string, target types.AppDetails) bool { - if kind != target.Kind { - return false - } - for _, t := range target.Names { - if t == name { - return true - } - } - return false -} - func getAllPods(namespace string, client clients.ClientSets) (*kcorev1.PodList, error) { - return client.KubeClient.CoreV1().Pods(namespace).List(context.Background(), v1.ListOptions{}) + pods, err := client.KubeClient.CoreV1().Pods(namespace).List(context.Background(), v1.ListOptions{}) + if err != nil { + return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{namespace: %s, resource: AllPods}", namespace), Reason: fmt.Sprintf("failed to get all pods :%s", err.Error())} + } + return pods, nil }