diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..c61d816 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,213 @@ +# Contributing + +These guidelines will help you get started with the Starboard Operator project. + +## Prerequisites + +1. Install Go + + The project requires [Go 1.14][go-download] or later. We also assume that you're familiar with + Go's [GOPATH workspace][go-code] convention, and have the appropriate environment variables set. +2. Get the source code: + + ``` + $ git clone git@github.com:aquasecurity/starboard-operator.git + $ cd starboard-operator + ``` +3. Access to a dev Kubernetes cluster. We assume that you're using a single-node [KIND][kind] cluster created with the + following command: + + ``` + $ kind create cluster + ``` + +## Deployment + +You'll deploy the operator in the `starboard-operator` Namespace and configure it to watch the `starboard-operator` +Namespace. In OLM terms such install mode is called `OwnNamespace` and is suitable for end users who want to install +the operator in the same namespace as supervised workloads. + +> The `OwnNamespace` mode is good to get started with a basic development workflow. For other install modes see +> [Operator Multitenancy with OperatorGroups][olm-operator-groups]. + +### Prerequisites + +1. Build Docker images: + + ``` + $ export GOOS=linux + $ make docker-build + ``` + + This will build the `docker.io/aquasec/starboard-operator:dev` as well as `docker.io/aquasec/starboard-scanner-aqua:dev` + images. The second image is only used when you enable the Aqua CSP scanner. By default Trivy is used as vulnerability + scanner by pulling its official image accessible from DockerHub (`docker.io/aquasec/trivy:$TRIVY_VERSION`). +2. Load Docker images into the cluster node: + + ``` + $ kind load docker-image aquasec/starboard-operator:dev + $ kind load docker-image aquasec/starboard-scanner-aqua:dev + ``` +3. Send the definition of the VulnerabilityReport custom resource to the Kubernetes API: + + ``` + $ kubectl apply -f https://raw.githubusercontent.com/aquasecurity/starboard/master/kube/crd/vulnerabilityreports-crd.yaml + ``` +4. Send the following Kubernetes objects definitions to the Kubernetes API: + + ``` + $ kubectl apply -f deploy/kubectl/01-starboard-operator.ns.yaml \ + -f deploy/kubectl/02-starboard-operator.sa.yaml + -f deploy/kubectl/03-starboard-operator.role.yaml + -f deploy/kubectl/04-starboard-operator.rolebinding.yaml + ``` + + This will create the `starboard-operator` Namespace, and the `starboard-operator` ServiceAccount. Beyond that, + it will create the `starboard-operator` Role and bind it to the `starboard-operator` ServiceAccount in the + `starboard-operator` Namespace via the `starboard-operator` RoleBinding. + +### In cluster + +1. Create the `starboard-operator` Deployment in the `starboard-operator` namespace to run the operator's container: + + ``` + $ kubectl apply -f deploy/kubectl/05-starboard-operator.deployment.yaml + ``` + +### Out of cluster + +1. Run the main method of the operator program: + + ``` + $ go run cmd/operator/main.go + ``` + +### Enable Aqua CSP scanner + +1. Create the `starboard-operator` secret in the `starboard-operator` namespace that holds the scanner's configuration: + + ``` + $ kubectl create secret generic starboard-operator \ + --namespace starboard-operator \ + --from-literal OPERATOR_SCANNER_AQUA_CSP_USERNAME=$AQUA_CONSOLE_USERNAME \ + --from-literal OPERATOR_SCANNER_AQUA_CSP_PASSWORD=$AQUA_CONSOLE_PASSWORD \ + --from-literal OPERATOR_SCANNER_AQUA_CSP_VERSION=$AQUA_VERSION \ + --from-literal OPERATOR_SCANNER_AQUA_CSP_HOST=http://csp-console-svc.aqua:8080 + ``` + +## Operator Lifecycle Manager + +### Prerequisites + +1. Install [Operator Lifecycle Manager][olm] (OLM) and [Operator Marketplace][operator-marketplace]: + + ``` + $ ./deploy/olm/install.sh + ``` + +2. Install [Operator Courier][operator-courier]: + + ``` + $ pip3 install operator-courier + ``` +3. [Sign up][quay] for a free Quay.io account if you're a new user. + +### Build OLM bundle + +1. Lint the OLM bundle: + + ``` + $ BUNDLE_SRC_DIR=deploy/olm/bundle + $ operator-courier verify $BUNDLE_SRC_DIR + ``` +2. Retrieve a Quay.io token: + ``` + $ QUAY_USERNAME= + $ QUAY_PASSWORD= + $ QUAY_URL=https://quay.io/cnr/api/v1/users/login + + $ QUAY_TOKEN=$(curl -s -H "Content-Type: application/json" -XPOST $QUAY_URL -d \ + '{"user":{"username":"'"${QUAY_USERNAME}"'","password": "'"${QUAY_PASSWORD}"'"}}' | + jq -r .token) + ``` +3. Push the OLM bundle to Quay.io: + ``` + $ QUAY_NAMESPACE= + $ PACKAGE_NAME=starboard-operator + $ PACKAGE_VERSION= + + $ operator-courier push "$BUNDLE_SRC_DIR" "$QUAY_NAMESPACE" \ + "$PACKAGE_NAME" "$PACKAGE_VERSION" "$QUAY_TOKEN" + ``` + +### Create ClusterServiceVersion + +1. Create the OperatorSource resource: + + ``` + QUAY_FULL_NAME= + $ cat << EOF | kubectl apply -f - + apiVersion: operators.coreos.com/v1 + kind: OperatorSource + metadata: + name: $QUAY_USERNAME-operators + namespace: marketplace + spec: + type: appregistry + endpoint: https://quay.io/cnr + displayName: "$QUAY_FULL_NAME Quay.io Applications" + publisher: "$QUAY_FULL_NAME" + registryNamespace: "$QUAY_USERNAME" + EOF + ``` + + An OperatorSource resource defines the external data store used to host operator bundles. In this case, you will be + defining an OperatorSource to point to your Quay.io account, which will provide access to its hosted OLM bundles. + +2. Create the OperatorGroup resource: + + ``` + $ cat << EOF | kubectl apply -f - + apiVersion: operators.coreos.com/v1alpha2 + kind: OperatorGroup + metadata: + name: workloads + namespace: marketplace + spec: + targetNamespaces: + - marketplace + EOF + ``` + + You'll need an OperatorGroup to denote which namespaces the operator should watch. It must exist in the namespace + where you want to deploy the operator. + +3. Create the Subscription resource: + + ``` + cat << EOF | kubectl apply -f - + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: starboard-operator + namespace: marketplace + spec: + channel: alpha + name: starboard-operator + source: $QUAY_NAMESPACE-operators + sourceNamespace: marketplace + EOF + ``` + + A Subscription links the previous steps together by selecting an operator and one of its channels. OLM uses this + information to start the corresponding operator Pod. The example above creates a new Subscription to the `alpha` + channel for the Starboard Operator. + +[go-download]: https://golang.org/dl/ +[go-code]: https://golang.org/doc/code.html +[kind]: https://github.com/kubernetes-sigs/kind +[olm]: https://github.com/operator-framework/operator-lifecycle-manager +[operator-marketplace]: https://github.com/operator-framework/operator-marketplace +[operator-courier]: https://github.com/operator-framework/operator-courier +[olm-operator-groups]: https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/operatorgroups.md +[quay]: https://quay.io diff --git a/README.md b/README.md index 8d2acb1..61a47ba 100644 --- a/README.md +++ b/README.md @@ -9,40 +9,19 @@ This operator for Starboard automatically updates security report resources in r a Kubernetes cluster - for example, initiating a vulnerability scan when a new pod is started. Please see the main [Starboard][starboard] repo for more info about the Starboard project. -## Getting started +## Contributing -1. Run `make` to build operator binaries into Docker containers: - ``` - $ make docker-build - ``` -1. Define Custom Security Resources used by Starboard: - ``` - $ kubectl apply -f https://raw.githubusercontent.com/aquasecurity/starboard/master/kube/crd/vulnerabilityreports-crd.yaml - ``` -2. Create the `starboard-operator` Namespace: - ``` - $ kubectl create ns starboard-operator - ``` -3. Create a Secret that holds configuration of the Aqua CSP scanner: - ``` - $ kubectl create secret generic starboard-operator \ - --namespace starboard-operator \ - --from-literal OPERATOR_SCANNER_AQUA_CSP_USERNAME=$AQUA_CONSOLE_USERNAME \ - --from-literal OPERATOR_SCANNER_AQUA_CSP_PASSWORD=$AQUA_CONSOLE_PASSWORD \ - --from-literal OPERATOR_SCANNER_AQUA_CSP_VERSION=$AQUA_VERSION \ - --from-literal OPERATOR_SCANNER_AQUA_CSP_HOST=http://csp-console-svc.aqua:8080 - ``` -5. Create a Deployment for the Starboard Operator: - ``` - $ kubectl apply -f deploy/starboard-operator.yaml - ``` +Thanks for taking the time to join our community and start contributing! + +- See [CONTRIBUTING.md](CONTRIBUTING.md) for information about setting up your development environment and deploying the operator. +- Check out the [open issues](https://github.com/aquasecurity/starboard-operator/issues). ## Configuration | Name | Default | Description | |-----------------------------------------|----------------------|-------------| -| `OPERATOR_NAMESPACE` | `` | The namespace the operator is running in. | -| `OPERATOR_TARGET_NAMESPACE` | `` | The namespace the operator should be watching for changes. This can be a comma separated list of names to watch multiple namespaces (e.g. `ns1,ns2`). | +| `OPERATOR_NAMESPACE` | N/A | The namespace the operator is running in. | +| `OPERATOR_TARGET_NAMESPACES` | N/A | The namespace the operator should be watching for changes. This can be a comma separated list of names to watch multiple namespaces (e.g. `ns1,ns2`). | | `OPERATOR_SCAN_JOB_TIMEOUT` | `5m` | The length of time to wait before giving up on a scan job | | `OPERATOR_SCANNER_TRIVY_ENABLED` | `true` | The flag to enable Trivy vulnerability scanner | | `OPERATOR_SCANNER_TRIVY_VERSION` | `0.11.0` | The version of Trivy to be used | diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 6491f5b..bd6f79c 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -99,17 +99,18 @@ func run() error { Scheme: scheme, } - if len(targetNamespaces) == 1 { - // Add support for OwnNamespace and SingleNamespace set in STARBOARD_TARGET_NAMESPACE (e.g. ns1). + if len(targetNamespaces) == 1 && targetNamespaces[0] == operatorNamespace { + // Add support for OwnNamespace set in STARBOARD_TARGET_NAMESPACES (e.g. ns1). setupLog.Info("Constructing single-namespaced cache", "namespace", targetNamespaces[0]) options.Namespace = targetNamespaces[0] } else { - // Add support for MultiNamespace set in STARBOARD_TARGET_NAMESPACE (e.g. ns1,ns2). + // Add support for SingleNamespace and MultiNamespace set in STARBOARD_TARGET_NAMESPACES (e.g. ns1,ns2). // Note that we may face performance issues when using this with a high number of namespaces. // More: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder - setupLog.Info("Constructing multi-namespaced cache", "namespaces", targetNamespaces) + cachedNamespaces := append(targetNamespaces, operatorNamespace) + setupLog.Info("Constructing multi-namespaced cache", "namespaces", cachedNamespaces) options.Namespace = "" - options.NewCache = cache.MultiNamespacedCacheBuilder(targetNamespaces) + options.NewCache = cache.MultiNamespacedCacheBuilder(cachedNamespaces) } kubernetesConfig, err := ctrl.GetConfig() @@ -141,7 +142,7 @@ func run() error { Client: mgr.GetClient(), Store: store, Scanner: scanner, - Log: ctrl.Log.WithName("controller").WithName("pods"), + Log: ctrl.Log.WithName("controller").WithName("Pod"), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { return fmt.Errorf("unable to create pod controller: %w", err) @@ -153,7 +154,7 @@ func run() error { Client: mgr.GetClient(), Store: store, Scanner: scanner, - Log: ctrl.Log.WithName("controller").WithName("scan-jobs"), + Log: ctrl.Log.WithName("controller").WithName("Job"), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { return fmt.Errorf("unable to create job controller: %w", err) diff --git a/deploy/examples/aqua-scan-job.yaml b/deploy/examples/aqua-scan-job.yaml index 00fd00c..69ca5e1 100644 --- a/deploy/examples/aqua-scan-job.yaml +++ b/deploy/examples/aqua-scan-job.yaml @@ -30,7 +30,7 @@ spec: mountPath: /downloads containers: - name: scanner - image: docker.io/aquasec/starboard-scanner-aqua:0.0.1-alpha.2 + image: docker.io/aquasec/starboard-scanner-aqua:0.0.1-alpha.4 imagePullPolicy: IfNotPresent command: - "/bin/sh" diff --git a/deploy/kubectl/01-starboard-operator.ns.yaml b/deploy/kubectl/01-starboard-operator.ns.yaml new file mode 100644 index 0000000..de348f7 --- /dev/null +++ b/deploy/kubectl/01-starboard-operator.ns.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: starboard-operator diff --git a/deploy/kubectl/02-starboard-operator.sa.yaml b/deploy/kubectl/02-starboard-operator.sa.yaml new file mode 100644 index 0000000..7a2786d --- /dev/null +++ b/deploy/kubectl/02-starboard-operator.sa.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: starboard-operator + namespace: starboard-operator diff --git a/deploy/kubectl/03-starboard-operator.role.yaml b/deploy/kubectl/03-starboard-operator.role.yaml new file mode 100644 index 0000000..45269d5 --- /dev/null +++ b/deploy/kubectl/03-starboard-operator.role.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: starboard-operator + namespace: starboard-operator +rules: + - apiGroups: + - "" + resources: + - "pods" + - "pods/log" + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - create + - delete + - apiGroups: + - aquasecurity.github.io + resources: + - vulnerabilityreports + verbs: + - get + - list + - watch + - create diff --git a/deploy/kubectl/04-starboard-operator.rolebinding.yaml b/deploy/kubectl/04-starboard-operator.rolebinding.yaml new file mode 100644 index 0000000..3040365 --- /dev/null +++ b/deploy/kubectl/04-starboard-operator.rolebinding.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: starboard-operator + namespace: starboard-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: starboard-operator +subjects: + - kind: ServiceAccount + name: starboard-operator + namespace: starboard-operator diff --git a/deploy/starboard-operator.yaml b/deploy/kubectl/05-starboard-operator.deployment.yaml similarity index 61% rename from deploy/starboard-operator.yaml rename to deploy/kubectl/05-starboard-operator.deployment.yaml index f9f1fbc..23c09f1 100644 --- a/deploy/starboard-operator.yaml +++ b/deploy/kubectl/05-starboard-operator.deployment.yaml @@ -1,64 +1,4 @@ --- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: starboard-operator - namespace: starboard-operator ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: starboard-operator -rules: - - apiGroups: - - "" - resources: - - "pods" - - "pods/log" - verbs: - - get - - list - - watch - - apiGroups: - - apps - resources: - - replicasets - verbs: - - get - - list - - apiGroups: - - batch - resources: - - jobs - verbs: - - get - - list - - watch - - create - - delete - - apiGroups: - - aquasecurity.github.io - resources: - - vulnerabilityreports - verbs: - - get - - list - - watch - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: starboard-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: starboard-operator -subjects: - - kind: ServiceAccount - name: starboard-operator - namespace: starboard-operator ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -83,12 +23,16 @@ spec: fsGroup: 10000 containers: - name: operator - image: docker.io/aquasec/starboard-operator:0.0.1-alpha.2 + image: docker.io/aquasec/starboard-operator:dev imagePullPolicy: IfNotPresent securityContext: privileged: false readOnlyRootFilesystem: true env: + - name: OPERATOR_NAMESPACE + value: "starboard-operator" + - name: OPERATOR_TARGET_NAMESPACES + value: "starboard-operator" - name: OPERATOR_SCANNER_TRIVY_ENABLED value: "true" - name: OPERATOR_SCANNER_TRIVY_VERSION @@ -100,18 +44,22 @@ spec: secretKeyRef: name: starboard-operator key: OPERATOR_SCANNER_AQUA_CSP_VERSION + optional: true - name: OPERATOR_SCANNER_AQUA_CSP_HOST valueFrom: secretKeyRef: name: starboard-operator key: OPERATOR_SCANNER_AQUA_CSP_HOST + optional: true - name: OPERATOR_SCANNER_AQUA_CSP_USER valueFrom: secretKeyRef: name: starboard-operator key: OPERATOR_SCANNER_AQUA_CSP_USERNAME + optional: true - name: OPERATOR_SCANNER_AQUA_CSP_PASSWORD valueFrom: secretKeyRef: name: starboard-operator key: OPERATOR_SCANNER_AQUA_CSP_PASSWORD + optional: true diff --git a/deploy/olm/bundle/0.0.1-alpha.4/starboard-operator.v0.0.1-alpha.4.clusterserviceversion.yaml b/deploy/olm/bundle/0.0.1-alpha.4/starboard-operator.v0.0.1-alpha.4.clusterserviceversion.yaml new file mode 100644 index 0000000..2dc6e54 --- /dev/null +++ b/deploy/olm/bundle/0.0.1-alpha.4/starboard-operator.v0.0.1-alpha.4.clusterserviceversion.yaml @@ -0,0 +1,147 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + name: starboard-operator.v0.0.1-alpha.4 + namespace: placeholder + annotations: + capabilities: Basic Install + categories: Security + description: Keeps Starboard resources updated + certified: "false" + containerImage: aquasec/starboard-operator:0.0.1-alpha.4 + createdAt: 2020-09-15T08:00:00Z + support: Aqua Security, Inc. + repository: https://github.com/aquasecurity/starboard-operator + alm-examples: |- + [] +spec: + displayName: Starboard Operator + version: 0.0.1-alpha.4 + description: |- + This operator for Starboard automatically updates security report resources in response to workload and other changes on + a Kubernetes cluster - for example, initiating a vulnerability scan when a new pod is started. Please see the main + [Starboard](https://github.io/aquasecurity/starboard) repo for more info about the Starboard project. + keywords: ['aqua-security', 'scanning', 'security'] + maintainers: + - name: Daniel Pacak + email: daniel.pacak@aquasec.com + provider: + name: Aqua Security, Inc. + maturity: alpha + labels: + name: starboard-operator + selector: + matchLabels: + name: starboard-operator + links: + - name: Starboard Operator on GitHub + url: https://github.com/aquasecurity/starboard-operator + - name: Starboard on GitHub + url: https://github.com/aquasecurity/starboard + - name: Introduction to Starboard by Liz Rice + url: https://blog.aquasec.com/starboard-kubernetes-tools + icon: + - base64data: PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iaXNvLTg4NTktMSI/Pg0KPCEtLSBHZW5lcmF0b3I6IEFkb2JlIElsbHVzdHJhdG9yIDI0LjIuMSwgU1ZHIEV4cG9ydCBQbHVnLUluIC4gU1ZHIFZlcnNpb246IDYuMDAgQnVpbGQgMCkgIC0tPg0KPHN2ZyB2ZXJzaW9uPSIxLjEiIGlkPSJzdGFyYm9hcmQiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiIHg9IjBweCIgeT0iMHB4Ig0KCSB3aWR0aD0iNTAwcHgiIGhlaWdodD0iNTAwcHgiIHZpZXdCb3g9IjAgMCA1MDAgNTAwIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MDAgNTAwIiB4bWw6c3BhY2U9InByZXNlcnZlIj4NCjxnPg0KCTxwYXRoIGZpbGw9IiMwRDgxOUIiIGQ9Ik00NDAuMjA0LDI2MS43ODNsLTIwLjA1OSwyNC44MWwwLjk0Niw4Ljk3N2MwLjM5NSwxLjcxNiwwLjQ0MSwzLjU5NywwLjE0LDUuNDE0bC0wLjE2NywwLjg2OA0KCQljLTAuMzczLDEuNjQzLTEuMDMyLDMuMjE5LTEuOSw0LjU0NmMtMC4yOCwwLjQyNC0wLjU3NiwwLjgzOS0wLjgxOSwxLjE0NWwtOTcuNjcsMTIxLjQzNmMtMS40ODgsMS44NDYtMy41MTYsMy4zMy01Ljg2Nyw0LjI4OA0KCQljLTEuNzY4LDAuNzIxLTMuNjUzLDEuMTAxLTUuNDUxLDEuMTAxbC0xNTYuNjQ4LTAuMDM3Yy0xLjg4MSwwLTMuODUyLTAuNDE5LTUuNzAzLTEuMjExYy0yLjI0NC0wLjk2MS00LjE4NS0yLjQwMy01LjU5NS00LjE1NA0KCQlMNDMuNzc0LDMwNy41MTFjLTAuODMxLTEuMDM1LTEuNTItMi4yMzEtMi4wNDctMy41NTNjLTAuMjQ1LTAuNjItMC40NDktMS4yNTgtMC42MTEtMS45MWMtMC41NDQtMi4yMDItMC41ODEtNC40NTMtMC4xMS02LjUwNw0KCQlsMzQuODY5LTE1MS40NTNjMC40ODgtMi4xMjMsMS40ODgtNC4xNDQsMi44OTEtNS44NDNjMS4zMjktMS42MDgsMi45NS0yLjg3OSw0LjgxMy0zLjc2OWwxNDEuMjA5LTY3LjQyMw0KCQljMS45MzctMC45MjcsNC4xMDktMS40MTcsNi4yODctMS40MjJsMC4xMSwwLjAwNWwwLjczMSwwLjAyNWMxLjg4MywwLjEwMywzLjc3NiwwLjU4Niw1LjQ3LDEuMzk1bDE0MS4xMDYsNjcuNDINCgkJYzEuNzcsMC44NDYsMy4zMjcsMi4wMyw0LjYyNCwzLjUxOWMxLjU1NywxLjc4NSwyLjY1NSwzLjkzMywzLjE4LDYuMjE2bDEyLjkwOSw1Ni40ODJsMjQuNzU0LTQuMzg3bC0xMy4xOTMtNTcuNzE5DQoJCWMtMS40NDktNi4yOTctNC40NjUtMTIuMjA2LTguNzI5LTE3LjA5NWMtMy41Ni00LjA4Mi03LjgzNi03LjMzNC0xMi43MTYtOS42NjhMMjQ4LjIwOCw0NC40Yy00LjU2My0yLjE4LTkuNDMzLTMuNDUtMTQuNDc5LTMuNzgxDQoJCWwtMC42NTItMC4wNDdjLTAuNTc0LTAuMDI3LTEuMTU1LTAuMDMyLTEuMjQ4LTAuMDMybC0wLjc5OS0wLjAxN2MtNS44OSwwLjAxLTExLjc5NCwxLjM1My0xNy4wNjgsMy44NzZsLTE0MS4yMDYsNjcuNDINCgkJYy01LjE4MywyLjQ3OS05LjY3NSw1Ljk5LTEzLjM0NiwxMC40MzNjLTMuODc2LDQuNjkxLTYuNjQ1LDEwLjI5My04LjAwMywxNi4yMDJMMTYuNTM4LDI4OS45MDkNCgkJYy0xLjM1MSw1Ljg4LTEuMjgsMTIuMTY0LDAuMjA0LDE4LjE2NGMwLjQzNCwxLjc1OCwwLjk5MywzLjQ5NiwxLjY1Nyw1LjE3MWMxLjQ1NCwzLjY0OCwzLjQwNiw3LjAxLDUuODA2LDkuOTk5bDk3LjY0OCwxMjEuNDczDQoJCWMzLjk5Miw0Ljk0Niw5LjI3MSw4LjkxOCwxNS4yNjgsMTEuNDg1YzQuOTQ2LDIuMTE4LDEwLjMzNSwzLjIzOSwxNS42MDIsMy4yMzlsMTU2LjYzOCwwLjAzN2M1LjA4NSwwLDEwLjEwNy0wLjk5NSwxNC45MjItMi45NTcNCgkJYzYuMjgyLTIuNTYsMTEuNzk2LTYuNjM3LDE1Ljk1Mi0xMS43OTlsOTcuNzQ2LTEyMS41MzJjMC43Ny0wLjk3NiwxLjQ5OC0xLjk4NCwyLjE4Ny0zLjAzOGMyLjUyOC0zLjg2Miw0LjMzNy04LjE0OCw1LjM3Ny0xMi43MzMNCgkJYzAuMTcyLTAuNzUsMC4zMTYtMS41MSwwLjQ0OS0yLjI5N2MwLjg0OC01LjExNywwLjcwNC0xMC4yMi0wLjQzNC0xNS4xNzJMNDQwLjIwNCwyNjEuNzgzeiIvPg0KCTxnPg0KCQk8cG9seWdvbiBmaWxsPSIjMEQ4MTlCIiBwb2ludHM9IjU2LjQ5MSwyOTEuNTk1IDE2Mi4zNzMsMjk1Ljc2NiA0ODQuNTQ2LDE5My4wNzUgMjIwLjI3NCwyNDMuNTE0IAkJIi8+DQoJCTxwb2x5Z29uIGZpbGw9IiM5OUQ1RUIiIHBvaW50cz0iMjAyLjI5NSwxMDAuOTk4IDE5OC4wOTEsMzMyLjkxIDMxNS4yMjEsMjcxLjI0MSAJCSIvPg0KCQk8cG9seWdvbiBmaWxsPSIjMDJCMEQ0IiBwb2ludHM9IjIwMi4yNjUsMTAxLjA1OCAxOTguMzk1LDMzNi41MjggMTM2Ljc4MSwyOTIuNCAJCSIvPg0KCQk8cG9seWdvbiBmaWxsPSIjRjc5NDIwIiBwb2ludHM9IjU2LjQ4NywyOTEuNTkyIDE2MC45NzcsMjg5LjM5MyAxMjIuMDM0LDM3Mi40MDggCQkiLz4NCgkJPHBvbHlnb24gZmlsbD0iI0YwREUzNCIgcG9pbnRzPSI0ODQuODEzLDE5Mi44NzEgMTYwLjQ4MSwyODkuNCAzMDYuMTc1LDQxMy44MTggCQkiLz4NCgkJPHBvbHlnb24gZmlsbD0iI0ZBQUY0MCIgcG9pbnRzPSIxNjAuNDczLDI4OS40MDQgMzA2LjE3NSw0MTMuODYyIDEyMS45OCwzNzIuMzY0IAkJIi8+DQoJPC9nPg0KPC9nPg0KPC9zdmc+DQo= + mediatype: image/svg+xml + miniKubeVersion: 1.11.0 + ## installModes When creating OperatorGroups it is important to keep in mind that an + ## operator may not support all namespace configuration. For example, an operator that + ## is designed to run at the cluster level shouldn't be expected to work in an + ## OperatorGroup that defines a single targetNamespace. + ## + ## There are four InstallModes that an operator can support: + ## + ## - OwnNamespace: If supported, the operator can be configured to watch events in the + ## namespace it is deployed in. + ## - SingleNamespace: If supported, the operator can be configured to watch for events + ## in a single namespace that the operator is not deployed in. + ## - MultiNamespace: If supported, the operator can be configured to watch for events + ## in more than one namespace. + ## - AllNamespaces: If supported, the operator can be configured to watch for events + ## in all namespaces. + installModes: + - type: OwnNamespace + supported: true + - type: SingleNamespace + supported: true + - type: MultiNamespace + supported: true + - type: AllNamespaces + supported: false + install: + strategy: deployment + spec: + permissions: + - serviceAccountName: starboard-operator + rules: + - apiGroups: + - "" + resources: + - "pods" + - "pods/log" + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - list + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - create + - delete + - apiGroups: + - aquasecurity.github.io + resources: + - vulnerabilityreports + verbs: + - get + - list + - watch + - create + deployments: + - name: starboard-operator + spec: + replicas: 1 + selector: + matchLabels: + name: starboard-operator + template: + metadata: + labels: + name: starboard-operator + spec: + serviceAccountName: starboard-operator + containers: + - name: starboard-operator + image: aquasec/starboard-operator:0.0.1-alpha.4 + imagePullPolicy: IfNotPresent + env: + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.annotations['olm.operatorNamespace'] + - name: OPERATOR_TARGET_NAMESPACES + valueFrom: + fieldRef: + fieldPath: metadata.annotations['olm.targetNamespaces'] + ports: + - containerPort: 8080 + name: metrics + customresourcedefinitions: + owned: + - kind: VulnerabilityReport + name: vulnerabilityreports.aquasecurity.github.io + version: v1alpha1 + displayName: VulnerabilityReport + description: Represents the result of scanning a container image for known security vulnerabilities. diff --git a/deploy/olm/bundle/0.0.1-alpha.4/vulnerabilityreports.v1alpha1.aquasecurity.github.io.yaml b/deploy/olm/bundle/0.0.1-alpha.4/vulnerabilityreports.v1alpha1.aquasecurity.github.io.yaml new file mode 100644 index 0000000..36f8c39 --- /dev/null +++ b/deploy/olm/bundle/0.0.1-alpha.4/vulnerabilityreports.v1alpha1.aquasecurity.github.io.yaml @@ -0,0 +1,175 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: vulnerabilityreports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + versions: + - name: v1alpha1 + served: true + storage: true + scope: Namespaced + names: + singular: vulnerabilityreport + plural: vulnerabilityreports + kind: VulnerabilityReport + listKind: VulnerabilityReportList + categories: + - all + shortNames: + - vuln + - vulns + validation: + openAPIV3Schema: + type: object + required: + - apiVersion + - kind + - metadata + - report + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + report: + type: object + required: + - scanner + - artifact + - summary + - vulnerabilities + properties: + scanner: + type: object + required: + - name + - vendor + - version + properties: + name: + type: string + vendor: + type: string + version: + type: string + registry: + type: object + properties: + server: + type: string + artifact: + type: object + properties: + repository: + type: string + digest: + type: string + tag: + type: string + mimeType: + type: string + summary: + type: object + required: + - criticalCount + - highCount + - mediumCount + - lowCount + - unknownCount + properties: + criticalCount: + type: integer + minimum: 0 + highCount: + type: integer + minimum: 0 + mediumCount: + type: integer + minimum: 0 + lowCount: + type: integer + minimum: 0 + unknownCount: + type: integer + minimum: 0 + vulnerabilities: + type: array + items: + type: object + required: + - vulnerabilityID + - resource + - installedVersion + - fixedVersion + - severity + - title + properties: + vulnerabilityID: + type: string + resource: + type: string + installedVersion: + type: string + fixedVersion: + type: string + severity: + type: string + enum: + - CRITICAL + - HIGH + - MEDIUM + - LOW + - UNKNOWN + title: + type: string + description: + type: string + links: + type: array + items: + type: string + additionalPrinterColumns: + - JSONPath: .report.artifact.repository + type: string + name: Repository + description: The name of image repository + - JSONPath: .report.artifact.tag + type: string + name: Tag + description: The name of image tag + - JSONPath: .report.scanner.name + type: string + name: Scanner + description: The name of the vulnerability scanner + - JSONPath: .metadata.creationTimestamp + type: date + name: Age + description: The age of Vulnerability + - JSONPath: .report.summary.criticalCount + type: integer + name: Critical + description: The numer of critical vulnerabilities + priority: 1 + - JSONPath: .report.summary.highCount + type: integer + name: High + description: The number of high vulnerabilities + priority: 1 + - JSONPath: .report.summary.mediumCount + type: integer + name: Medium + description: The number of medium vulnerabilities + priority: 1 + - JSONPath: .report.summary.lowCount + type: integer + name: Low + description: The number of low vulnerabilities + priority: 1 + - JSONPath: .report.summary.unknownCount + type: integer + name: Unknown + description: The number of unknown vulnerabilities + priority: 1 diff --git a/deploy/olm/bundle/starboard-operator.package.yaml b/deploy/olm/bundle/starboard-operator.package.yaml new file mode 100644 index 0000000..39408d7 --- /dev/null +++ b/deploy/olm/bundle/starboard-operator.package.yaml @@ -0,0 +1,5 @@ +packageName: starboard-operator +channels: + - name: alpha + currentCSV: starboard-operator.v0.0.1-alpha.4 +defaultChannel: alpha diff --git a/deploy/olm/install.sh b/deploy/olm/install.sh new file mode 100755 index 0000000..cc4e619 --- /dev/null +++ b/deploy/olm/install.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +OLM_VERSION=0.15.1 +MARKETPLACE_VERSION=4.5 +OPERATOR_MARKETPLACE_VERSION="release-${MARKETPLACE_VERSION}" + +curl -sL https://github.com/operator-framework/operator-lifecycle-manager/releases/download/${OLM_VERSION}/install.sh | bash -s ${OLM_VERSION} +kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-marketplace/${OPERATOR_MARKETPLACE_VERSION}/deploy/upstream/01_namespace.yaml +kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-marketplace/${OPERATOR_MARKETPLACE_VERSION}/deploy/upstream/03_operatorsource.crd.yaml +kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-marketplace/${OPERATOR_MARKETPLACE_VERSION}/deploy/upstream/04_service_account.yaml +kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-marketplace/${OPERATOR_MARKETPLACE_VERSION}/deploy/upstream/05_role.yaml +kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-marketplace/${OPERATOR_MARKETPLACE_VERSION}/deploy/upstream/06_role_binding.yaml +sleep 1 +kubectl apply -f https://raw.githubusercontent.com/operator-framework/operator-marketplace/${OPERATOR_MARKETPLACE_VERSION}/deploy/upstream/07_upstream_operatorsource.cr.yaml +curl -sL https://raw.githubusercontent.com/operator-framework/operator-marketplace/${OPERATOR_MARKETPLACE_VERSION}/deploy/upstream/08_operator.yaml | \ +sed -e "s;quay.io/openshift/origin-operator-marketplace:latest;quay.io/openshift/origin-operator-marketplace:${MARKETPLACE_VERSION};" | \ +kubectl apply -f - diff --git a/pkg/aqua/scanner.go b/pkg/aqua/scanner.go index c997814..efa939f 100644 --- a/pkg/aqua/scanner.go +++ b/pkg/aqua/scanner.go @@ -58,9 +58,10 @@ func (s *aquaScanner) NewScanJob(resource kube.Object, spec corev1.PodSpec, opti Name: jobName, Namespace: options.Namespace, Labels: labels.Set{ - kube.LabelResourceKind: string(resource.Kind), - kube.LabelResourceName: resource.Name, - kube.LabelResourceNamespace: resource.Namespace, + kube.LabelResourceKind: string(resource.Kind), + kube.LabelResourceName: resource.Name, + kube.LabelResourceNamespace: resource.Namespace, + "app.kubernetes.io/managed-by": "starboard-operator", }, Annotations: map[string]string{ kube.AnnotationContainerImages: containerImagesAsJSON, @@ -73,9 +74,10 @@ func (s *aquaScanner) NewScanJob(resource kube.Object, spec corev1.PodSpec, opti Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels.Set{ - kube.LabelResourceKind: string(resource.Kind), - kube.LabelResourceName: resource.Name, - kube.LabelResourceNamespace: resource.Namespace, + kube.LabelResourceKind: string(resource.Kind), + kube.LabelResourceName: resource.Name, + kube.LabelResourceNamespace: resource.Namespace, + "app.kubernetes.io/managed-by": "starboard-operator", }, }, Spec: corev1.PodSpec{ diff --git a/pkg/controllers/job_controller.go b/pkg/controllers/job_controller.go index 9f1756f..10733bc 100644 --- a/pkg/controllers/job_controller.go +++ b/pkg/controllers/job_controller.go @@ -5,6 +5,8 @@ import ( "fmt" "reflect" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/aquasecurity/starboard-operator/pkg/etc" "github.com/aquasecurity/starboard-operator/pkg/logs" "github.com/aquasecurity/starboard-operator/pkg/scanner" @@ -36,31 +38,35 @@ type JobReconciler struct { func (r *JobReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { ctx := context.Background() - log := r.Log.WithValues("job", req.NamespacedName) + log := r.Log.WithValues("job", fmt.Sprintf("%s/%s", req.Namespace, req.Name)) + if req.Namespace != r.Config.Namespace { + log.Info("Ignoring Job not managed by this operator") return ctrl.Result{}, nil } - j := &batchv1.Job{} - err := r.Client.Get(ctx, req.NamespacedName, j) + job := &batchv1.Job{} + err := r.Client.Get(ctx, req.NamespacedName, job) if err != nil && errors.IsNotFound(err) { + log.Info("Ignoring Job that must have been deleted") return ctrl.Result{}, nil } else if err != nil { - return ctrl.Result{}, err + return ctrl.Result{}, fmt.Errorf("getting job from cache: %w", err) } - if len(j.Status.Conditions) == 0 { + if len(job.Status.Conditions) == 0 { + log.Info("Ignoring Job with unknown status condition") return ctrl.Result{}, nil } - switch jobCondition := j.Status.Conditions[0].Type; jobCondition { + switch jobCondition := job.Status.Conditions[0].Type; jobCondition { case batchv1.JobComplete: - err := r.processCompleteScanJob(ctx, j) + err := r.processCompleteScanJob(ctx, job) if err != nil { return ctrl.Result{}, err } case batchv1.JobFailed: - err := r.processFailedScanJob(ctx, j) + err := r.processFailedScanJob(ctx, job) if err != nil { return ctrl.Result{}, err } @@ -73,8 +79,7 @@ func (r *JobReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { } func (r *JobReconciler) processCompleteScanJob(ctx context.Context, scanJob *batchv1.Job) error { - log := r.Log.WithValues("job.name", scanJob.Name, "job.namespace", scanJob.Namespace) - log.Info("Started processing complete scan job") + log := r.Log.WithValues("job", fmt.Sprintf("%s/%s", scanJob.Namespace, scanJob.Name)) workload, err := kube.ObjectFromLabelsSet(scanJob.Labels) if err != nil { return fmt.Errorf("getting workload from scan job labels set: %w", err) @@ -121,9 +126,8 @@ func (r *JobReconciler) processCompleteScanJob(ctx context.Context, scanJob *bat if err != nil { return fmt.Errorf("writing vulnerability reports: %w", err) } - log.Info("Finished processing complete scan job") log.Info("Deleting complete scan job") - return r.Client.Delete(ctx, scanJob) + return r.Client.Delete(ctx, scanJob, client.PropagationPolicy(metav1.DeletePropagationBackground)) } func (r *JobReconciler) GetPodControlledBy(ctx context.Context, job *batchv1.Job) (*corev1.Pod, error) { diff --git a/pkg/controllers/pod_controller.go b/pkg/controllers/pod_controller.go index 9cad378..733261f 100644 --- a/pkg/controllers/pod_controller.go +++ b/pkg/controllers/pod_controller.go @@ -2,6 +2,7 @@ package controllers import ( "context" + "fmt" "reflect" "github.com/aquasecurity/starboard-operator/pkg/etc" @@ -45,45 +46,55 @@ type PodReconciler struct { func (r *PodReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { ctx := context.Background() - if r.Config.Namespace == req.Namespace { - return ctrl.Result{}, nil - } - pod := &corev1.Pod{} + log := r.Log.WithValues("pod", fmt.Sprintf("%s/%s", req.Namespace, req.Name)) + + // Retrieve the Pod from cache. err := r.Client.Get(ctx, req.NamespacedName, pod) if err != nil && errors.IsNotFound(err) { + log.Info("Ignoring Pod that must have been deleted") return ctrl.Result{}, nil } else if err != nil { - return ctrl.Result{}, err + return ctrl.Result{}, fmt.Errorf("getting pod from cache: %w", err) + } + + // Check if the Pod is managed by the operator, i.e. is controlled by a scan Job created by the PodReconciler. + if IsPodManagedByStarboardOperator(pod) { + log.Info("Ignoring Pod managed by this operator") + return ctrl.Result{}, nil } - // Check if the Pod is being terminated + // Check if the Pod is being terminated. if pod.DeletionTimestamp != nil { + log.Info("Ignoring Pod that is being terminated") return ctrl.Result{}, nil } - // Check if the Pod has been scheduled to a Node and all its containers are ready - if !AllContainersHaveReadyCondition(pod) { + // Check if the Pod containers are ready. + if !HasContainersReadyCondition(pod) { + log.Info("Ignoring Pod that is being scheduled") return ctrl.Result{}, nil } owner := GetImmediateOwnerReference(pod) + log.Info("Resolving Pod owner", "owner", owner) - // Check if the Pod's containers have corresponding vulnerability reports + // Check if containers of the Pod have corresponding VulnerabilityReports. hasDesiredState, err := r.hasVulnerabilityReports(ctx, owner, pod) if err != nil { - return ctrl.Result{}, err + return ctrl.Result{}, fmt.Errorf("getting vulnerability reports: %w", err) } if hasDesiredState { + log.Info("Ignoring Pod that already has VulnerabilityReports") return ctrl.Result{}, nil } - // Create a scan Job to find vulnerabilities in the Pod container images + // Create a scan Job to create VulnerabilityReports for the Pod containers images. err = r.ensureScanJob(ctx, owner, pod) if err != nil { - return ctrl.Result{}, err + return ctrl.Result{}, fmt.Errorf("ensuring scan job: %w", err) } return ctrl.Result{}, nil @@ -110,10 +121,9 @@ func (r *PodReconciler) hasVulnerabilityReports(ctx context.Context, owner kube. } func (r *PodReconciler) ensureScanJob(ctx context.Context, owner kube.Object, p *corev1.Pod) error { - log := r.Log.WithValues("owner.kind", owner.Kind, - "owner.name", owner.Name, - "owner.namespace", owner.Namespace, - "pod.name", p.Name) + log := r.Log.WithValues("pod", fmt.Sprintf("%s/%s", p.Namespace, p.Name)) + + log.Info("Ensuring scan Job") jobList := &batchv1.JobList{} err := r.Client.List(ctx, jobList, client.MatchingLabels{ @@ -122,11 +132,12 @@ func (r *PodReconciler) ensureScanJob(ctx context.Context, owner kube.Object, p kube.LabelResourceName: owner.Name, }, client.InNamespace(r.Config.Namespace)) if err != nil { - return err + return fmt.Errorf("listing jos: %w", err) } if len(jobList.Items) > 0 { - log.Info("Scan job already exists") + log.Info("Scan job already exists", + "job", fmt.Sprintf("%s/%s", jobList.Items[0].Namespace, jobList.Items[0].Name)) return nil } @@ -146,7 +157,8 @@ func (r *PodReconciler) ensureScanJob(ctx context.Context, owner kube.Object, p return err } } - log.Info("Creating scan job", "job.name", scanJob.Name, "job.namespace", scanJob.Namespace) + log.Info("Creating scan job", + "job", fmt.Sprintf("%s/%s", scanJob.Namespace, scanJob.Name)) return r.Client.Create(ctx, scanJob) } @@ -156,13 +168,23 @@ func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func AllContainersHaveReadyCondition(pod *corev1.Pod) bool { +// IsPodManagedByStarboardOperator returns true if the specified Pod +// is managed by the Starboard Operator, false otherwise. +// +// We define managed Pods as ones controlled by Jobs created by the Starboard Operator. +// They're labeled with `app.kubernetes.io/managed-by=starboard-operator`. +func IsPodManagedByStarboardOperator(pod *corev1.Pod) bool { + managedBy, exists := pod.Labels["app.kubernetes.io/managed-by"] + return exists && managedBy == "starboard-operator" +} + +func HasContainersReadyCondition(pod *corev1.Pod) bool { for _, condition := range pod.Status.Conditions { - if condition.Type != corev1.ContainersReady { - return false + if condition.Type == corev1.ContainersReady { + return true } } - return true + return false } func GetImmediateOwnerReference(pod *corev1.Pod) kube.Object { diff --git a/pkg/etc/config.go b/pkg/etc/config.go index 02f961c..2d4492b 100644 --- a/pkg/etc/config.go +++ b/pkg/etc/config.go @@ -22,10 +22,10 @@ type Config struct { } type Operator struct { - Namespace string `env:"OPERATOR_NAMESPACE"` - TargetNamespace string `env:"OPERATOR_TARGET_NAMESPACE"` - ServiceAccount string `env:"OPERATOR_SERVICE_ACCOUNT" envDefault:"starboard-operator"` - ScanJobTimeout time.Duration `env:"OPERATOR_SCAN_JOB_TIMEOUT" envDefault:"5m"` + Namespace string `env:"OPERATOR_NAMESPACE"` + TargetNamespaces string `env:"OPERATOR_TARGET_NAMESPACES"` + ServiceAccount string `env:"OPERATOR_SERVICE_ACCOUNT" envDefault:"starboard-operator"` + ScanJobTimeout time.Duration `env:"OPERATOR_SCAN_JOB_TIMEOUT" envDefault:"5m"` } type ScannerTrivy struct { @@ -58,15 +58,15 @@ func (c Config) GetOperatorNamespace() (string, error) { // GetTargetNamespaces returns namespaces the operator should be watching for changes. func (c Config) GetTargetNamespaces() ([]string, error) { - namespace := c.Operator.TargetNamespace - if namespace != "" { - return strings.Split(namespace, ","), nil + namespaces := c.Operator.TargetNamespaces + if namespaces != "" { + return strings.Split(namespaces, ","), nil } - return nil, fmt.Errorf("%s must be set", "OPERATOR_TARGET_NAMESPACE") + return nil, fmt.Errorf("%s must be set", "OPERATOR_TARGET_NAMESPACES") } -// ResolveInstallMode resolves install mode defined by Operator Lifecycle Manager. -// We do that for debugging purposes. +// ResolveInstallMode resolves install mode defined by the Operator Lifecycle Manager. +// We do that mainly for debugging and tracing purposes. func ResolveInstallMode(operatorNamespace string, targetNamespaces []string) (string, error) { if len(targetNamespaces) == 1 && operatorNamespace == targetNamespaces[0] { return "OwnNamespace", nil diff --git a/pkg/trivy/scanner.go b/pkg/trivy/scanner.go index bee9c31..e754d90 100644 --- a/pkg/trivy/scanner.go +++ b/pkg/trivy/scanner.go @@ -163,9 +163,10 @@ func (s *trivyScanner) NewScanJob(workload kube.Object, spec corev1.PodSpec, opt Name: jobName, Namespace: options.Namespace, Labels: map[string]string{ - kube.LabelResourceKind: string(workload.Kind), - kube.LabelResourceName: workload.Name, - kube.LabelResourceNamespace: workload.Namespace, + kube.LabelResourceKind: string(workload.Kind), + kube.LabelResourceName: workload.Name, + kube.LabelResourceNamespace: workload.Namespace, + "app.kubernetes.io/managed-by": "starboard-operator", }, Annotations: map[string]string{ kube.AnnotationContainerImages: containerImagesAsJSON, @@ -178,9 +179,10 @@ func (s *trivyScanner) NewScanJob(workload kube.Object, spec corev1.PodSpec, opt Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - kube.LabelResourceKind: string(workload.Kind), - kube.LabelResourceName: workload.Name, - kube.LabelResourceNamespace: workload.Namespace, + kube.LabelResourceKind: string(workload.Kind), + kube.LabelResourceName: workload.Name, + kube.LabelResourceNamespace: workload.Namespace, + "app.kubernetes.io/managed-by": "starboard-operator", }, }, Spec: corev1.PodSpec{