From f8090f7f70331e496ceffc50ae22d903e89b4123 Mon Sep 17 00:00:00 2001 From: KC Flynn <38958974+flynnkc@users.noreply.github.com> Date: Thu, 25 Sep 2025 14:43:36 -0400 Subject: [PATCH] Add support Oracle Kubernetes Engine (OKE) CIS Benchmark v1.7.0 --- cfg/config.yaml | 6 + cfg/oke-1.7.0/config.yaml | 13 ++ cfg/oke-1.7.0/controlplane.yaml | 24 +++ cfg/oke-1.7.0/managedservices.yaml | 158 +++++++++++++++ cfg/oke-1.7.0/node.yaml | 300 +++++++++++++++++++++++++++++ cfg/oke-1.7.0/policies.yaml | 220 +++++++++++++++++++++ job-oke.yaml | 48 +++++ 7 files changed, 769 insertions(+) create mode 100644 cfg/oke-1.7.0/config.yaml create mode 100644 cfg/oke-1.7.0/controlplane.yaml create mode 100644 cfg/oke-1.7.0/managedservices.yaml create mode 100644 cfg/oke-1.7.0/node.yaml create mode 100644 cfg/oke-1.7.0/policies.yaml create mode 100644 job-oke.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index f0d97cc4e..90980fc65 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -315,6 +315,7 @@ version_mapping: "rke2-cis-1.7": "rke2-cis-1.7" "rke2-cis-1.23": "rke2-cis-1.23" "rke2-cis-1.24": "rke2-cis-1.24" + "oke-1.7.0": "oke-1.7.0" target_mapping: "cis-1.5": @@ -549,3 +550,8 @@ target_mapping: - "controlplane" - "node" - "policies" + "oke-1.7.0": + - "node" + - "controlplane" + - "policies" + - "managedservices" diff --git a/cfg/oke-1.7.0/config.yaml b/cfg/oke-1.7.0/config.yaml new file mode 100644 index 000000000..8ea47c452 --- /dev/null +++ b/cfg/oke-1.7.0/config.yaml @@ -0,0 +1,13 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +node: + kubelet: + confs: + - "/etc/kubernetes/kubelet-config.json" + + svc: + - "/etc/systemd/system/kubelet.service.d/00-default.conf" + + defaultconf: "/etc/kubernetes/kubelet-config.json" + defaultsvc: "/etc/systemd/system/kubelet.service.d/00-default.conf" \ No newline at end of file diff --git a/cfg/oke-1.7.0/controlplane.yaml b/cfg/oke-1.7.0/controlplane.yaml new file mode 100644 index 000000000..7ef493a77 --- /dev/null +++ b/cfg/oke-1.7.0/controlplane.yaml @@ -0,0 +1,24 @@ +--- +controls: +version: "oke-1.7.0" +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Authentication and Authorization" + checks: + - id: 2.1.1 + text: "Client certificate authentication should not be used for users (Automated)" + type: skip + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of client certificates. + You can remediate the availability of client certificates in your OKE cluster. + + - id: 2.2 + text: "Logging" + type: "manual" + checks: + - id: 2.2.1 + text: "Ensure access to OCI Audit service Log for OKE (Manual)" + type: skip + remediation: "No remediation is necessary for this control." \ No newline at end of file diff --git a/cfg/oke-1.7.0/managedservices.yaml b/cfg/oke-1.7.0/managedservices.yaml new file mode 100644 index 000000000..121407831 --- /dev/null +++ b/cfg/oke-1.7.0/managedservices.yaml @@ -0,0 +1,158 @@ +--- +controls: +version: "oke-1.7.0" +id: 5 +text: "Managed services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Oracle Cloud Security Penetration and Vulnerability Testing (Manual)" + type: "manual" + remediation: | + As a service administrator, you can run tests for some Oracle Cloud services. Before running the tests, you must first review the Oracle Cloud Testing Policies section. + Note: + You must have an Oracle Account with the necessary privileges to file service maintenance requests, and you must be signed in to the environment that will be the subject of the penetration and vulnerability testing. + Submitting a Cloud Security Testing Notification: https://docs.cloud.oracle.com/en-us/iaas/Content/Security/Concepts/security_testing-policy.htm + scored: false + + - id: 5.1.2 + text: "Minimize user access control to Container Engine for Kubernetes (Manual)" + type: "manual" + remediation: | + By default, users are not assigned any Kubernetes RBAC roles (or clusterroles) by default. So before attempting to create a new role (or clusterrole), you must be assigned an appropriately privileged role (or clusterrole). A number of such roles and clusterroles are always created by default, including the cluster-admin clusterrole (for a full list, see Default Roles and Role Bindings in the Kubernetes documentation). The cluster-admin clusterrole essentially confers super-user privileges. A user granted the cluster-admin clusterrole can perform any operation across all namespaces in a given cluster. + Note that Oracle Cloud Infrastructure tenancy administrators already have sufficient privileges, and do not require the cluster-admin clusterrole. + See: Granting the Kubernetes RBAC cluster-admin clusterrole (https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengaboutaccesscontrol.htm) + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only (Manual)" + type: "manual" + remediation: | + To access a cluster using kubectl, you have to set up a Kubernetes configuration file (commonly known as a 'kubeconfig' file) for the cluster. The kubeconfig file (by default named config and stored in the $HOME/.kube directory) provides the necessary details to access the cluster. Having set up the kubeconfig file, you can start using kubectl to manage the cluster. + + The steps to follow when setting up the kubeconfig file depend on how you want to access the cluster: + • To access the cluster using kubectl in Cloud Shell, run an Oracle Cloud Infrastructure CLI command in the Cloud Shell window to set up the kubeconfig file. + • To access the cluster using a local installation of kubectl: + 1. Generate an API signing key pair (if you don't already have one). + 2. Upload the public key of the API signing key pair. + 3. Install and configure the Oracle Cloud Infrastructure CLI. + 4. Set up the kubeconfig file. + See Setting Up Local Access to Clusters (https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengdownloadkubeconfigfile.htm#localdownload) + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: | + If using Oracle Cloud Infrastructure Container Registry: Utilize OCI IAM policies to control access to container registry. + + If using a third party registry: Follow best practices based on vendor recommendations. + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Prefer using dedicated Service Accounts (Automated)" + type: "manual" + remediation: | + When you create a pod, if you do not specify a service account, it is automatically assigned the default service account in the same namespace. If you get the raw json or yaml for a pod you have created (for example, kubectl get pods/ -o yaml), you can see the spec.serviceAccountName field has been automatically set. + See Configure Service Accounts for Pods (https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) + + - id: 5.3 + text: "Cloud Key Management Service (Cloud KMS)" + checks: + - id: 5.3.1 + text: "Encrypting Kubernetes Secrets at Rest in Etcd (Manual)" + type: "manual" + remediation: | + You can create a cluster in one tenancy that uses a master encryption key in a different tenancy. In this case, you have to write cross-tenancy policies to enable the cluster in its tenancy to access the master encryption key in the Vault service's tenancy. Note that if you want to create a cluster and specify a master encryption key that's in a different tenancy, you cannot use the Console to create the cluster. + For example, assume the cluster is in the ClusterTenancy, and the master encryption key is in the KeyTenancy. Users belonging to a group (OKEAdminGroup) in the ClusterTenancy have permissions to create clusters. A dynamic group (OKEAdminDynGroup) has been created in the cluster, with the rule ALL {resource.type = 'cluster', resource.compartment.id = 'ocid1.compartment.oc1..'}, so all clusters created in the ClusterTenancy belong to the dynamic group. + In the root compartment of the KeyTenancy, the following policies: + + • use the ClusterTenancy's OCID to map ClusterTenancy to the alias OKE_Tenancy + • use the OCIDs of OKEAdminGroup and OKEAdminDynGroup to map them to the aliases RemoteOKEAdminGroup and RemoteOKEClusterDynGroup respectively + • give RemoteOKEAdminGroup and RemoteOKEClusterDynGroup the ability to list, view, and perform cryptographic operations with a particular master key in the KeyTenancy + + Define tenancy OKE_Tenancy as ocid1.tenancy.oc1.. + Define dynamic-group RemoteOKEClusterDynGroup as ocid1.dynamicgroup.oc1.. + Define group RemoteOKEAdminGroup as ocid1.group.oc1.. + Admit dynamic-group RemoteOKEClusterDynGroup of tenancy ClusterTenancy to use keys in tenancy where target.key.id = 'ocid1.key.oc1..' + Admit group RemoteOKEAdminGroup of tenancy ClusterTenancy to use keys in tenancy where target.key.id = 'ocid1.key.oc1..' + + In the root compartment of the ClusterTenancy, the following policies: + + • use the KeyTenancy's OCID to map KeyTenancy to the alias KMS_Tenancy + • give OKEAdminGroup and OKEAdminDynGroup the ability to use master keys in the KeyTenancy + • allow OKEAdminDynGroup to use a specific master key obtained from the KeyTenancy in the ClusterTenancy + + Define tenancy KMS_Tenancy as ocid1.tenancy.oc1.. + Endorse group OKEAdminGroup to use keys in tenancy KMS_Tenancy + Endorse dynamic-group OKEAdminDynGroup to use keys in tenancy KMS_Tenancy + Allow dynamic-group OKEAdminDynGroup to use keys in tenancy where target.key.id = 'ocid1.key.oc1..' + + See Accessing Object Storage Resources Across Tenancies for more examples of writing cross-tenancy policies. + Having entered the policies, you can now run a command similar to the following to create a cluster in the ClusterTenancy that uses the master key obtained from the KeyTenancy: + + oci ce cluster create --name oke-with-cross-kms --kubernetes-version v1.16.8 --vcn-id ocid1.vcn.oc1.iad. --service-lb-subnet-ids '["ocid1.subnet.oc1.iad."]' --compartment-id ocid1.compartment.oc1.. --kms-key-id ocid1.key.oc1.iad. + scored: false + + - id: 5.4 + text: "Cluster Networking" + checks: + - id: 5.4.1 + text: "Restrict Access to the Control Plane Endpoint (Automated)" + type: "manual" + remediation: | + Enable Master Authorized Networks to restrict access to the cluster's control plane (master endpoint) to only an allowlist (whitelist) of authorized IPs. + scored: false + + - id: 5.4.2 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Automated)" + type: "manual" + remediation: | + Disable access to the Kubernetes API from outside the node network if it is not required. + scored: false + + - id: 5.4.3 + text: "Ensure clusters are created with Private Nodes (Automated)" + type: "manual" + remediation: | + Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses. + scored: false + + - id: 5.4.4 + text: "Ensure Network Policy is Enabled and set as appropriate (Automated)" + type: "manual" + remediation: | + Configure Network Policy for the Cluster + scored: false + + - id: 5.4.5 + text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" + type: "manual" + remediation: | + Your load balancer vendor can provide details on configuring HTTPS with TLS. + scored: false + + - id: 5.5 + text: "Authentication and Authorization" + checks: + - id: 5.5.1 + text: "Access Control and Container Engine for Kubernetes (Manual)" + type: "manual" + remediation: | + Example: Granting the Kubernetes RBAC cluster-admin clusterrole + + Follow these steps to grant a user who is not a tenancy administrator the Kubernetes RBAC cluster-admin clusterrole on a cluster deployed on Oracle Cloud Infrastructure: + 1. If you haven't already done so, follow the steps to set up the cluster's kubeconfig configuration file and (if necessary) set the KUBECONFIG environment variable to point to the file. Note that you must set up your own kubeconfig file. You cannot access a cluster using a kubeconfig file that a different user set up. See Setting Up Cluster Access (https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengdownloadkubeconfigfile.htm#Setting_Up_Cluster_Access). + 2. In a terminal window, grant the Kubernetes RBAC cluster-admin clusterrole to the user by entering: + $ kubectl create clusterrolebinding --clusterrole=cluster-admin --user= + where: + • is a string of your choice to be used as the name for the binding between the user and the Kubernetes RBAC cluster-admin clusterrole. For example, jdoe_clst_adm + • is the user's OCID (obtained from the Console ). For example, ocid1.user.oc1..aaaaa...zutq (abbreviated for readability). + For example: + $ kubectl create clusterrolebinding jdoe_clst_adm --clusterrole=cluster-admin --user=ocid1.user.oc1..aaaaa...zutq \ No newline at end of file diff --git a/cfg/oke-1.7.0/node.yaml b/cfg/oke-1.7.0/node.yaml new file mode 100644 index 000000000..b63fbf73e --- /dev/null +++ b/cfg/oke-1.7.0/node.yaml @@ -0,0 +1,300 @@ +--- +controls: +version: "oke-1.7.0" +id: 3 +text: "Worker Nodes" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the $kubeletkubeconfig file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. For example, + + chmod 644 $kubeletkubeconfig + scored: true + + - id: 3.1.2 + text: "Ensure that the $kubeletkubeconfig file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; else echo \"File not found\"; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "File not found" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. For example, + + chown root:root $kubeletkubeconfig + scored: true + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identied in the Audit step): + + chmod 644 $kubeletconf + scored: true + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; else echo \"File not found\"; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "File not found" + remediation: | + Run the below command (using the config file location identied in the Audit step) on the each worker node. For example, + + chown root:root $kubeletconf + scored: true + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file $kubeletsvc and set the below parameter + + --anonymous-auth=false + + Based on your system, restart the kubelet service and check status + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--authentication-mode" + compare: + op: noteq + value: "AlwaysAllow" + - path: '{.authentication.webhook.enabled}' + compare: + op: eq + value: true + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file $kubeletsvc and set the below parameter + + --authorization-mode=Webhook + + Based on your system, restart the kubelet service and check status + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--client-ca-file" + path: '{.authentication.x509.clientCAFile}' + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file $kubeletsvc and set the below parameter + + --client-ca-file=/etc/kubernetes/ca.crt \ + + Based on your system, restart the kubelet service and check status + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: "0" + remediation: | + If modifying the Kubelet config file, edit the kubelet.service file $kubeletsvc and set the below parameter + + --read-only-port=0 + + For all remediations: + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--streaming-connection-idle-timeout" + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: "0" + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file $kubeletsvc and set the below parameter + + --streaming-connection-idle-timeout + + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + + - id: 3.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--make-iptables-util-chains" + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: "true" + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file $kubeletsvc and set the below parameter + + --make-iptables-util-chains:true + + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + + - id: 3.2.7 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--event-qps" + path: '{.eventRecordQPS}' + compare: + op: eq + value: "0" + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file $kubeletsvc and set the below parameter + + --event-qps=0 + + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.8 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + path: '{.tlsCertFile}' + - flag: "--tls-private-key-file" + path: '{.tlsPrivateKeyFile}' + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file $kubeletsvc and set the below parameter + + Verify that the `tls-cert-file=/var/lib/kubelet/pki/tls.pem`. + Verify that the `tls-private-key-file=/var/lib/kubelet/pki/tls.key`. + + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.9 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--rotate-certificates" + path: '{.rotateCertificates}' + compare: + op: noteq + value: false + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file $kubeletsvc and set the below parameter + + Verify that the `--rotate-certificates` is present. + + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.10 + text: "Ensure that the --rotate-server-certificates argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--rotate-server-certificates" + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: eq + value: true + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file $kubeletsvc and set the below parameter + + --rotate-server-certificates=true + + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true diff --git a/cfg/oke-1.7.0/policies.yaml b/cfg/oke-1.7.0/policies.yaml new file mode 100644 index 000000000..6a541eb60 --- /dev/null +++ b/cfg/oke-1.7.0/policies.yaml @@ -0,0 +1,220 @@ +--- +controls: +version: "oke-1.7.0" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Automated)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges. Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role : + + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Automated)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Automated)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Automated)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Automated)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server. + Modify the configuration of each default service account to include this value + + automountServiceAccountToken: false + + Automatic remediation for the default account: + + kubectl patch serviceaccount default -p + $'automountServiceAccountToken: false' + scored: false + + - id: 4.1.6 + text: "4.1.6 Ensure that Service Account Tokens are only mounted where necessary (Automated)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it. + scored: false + + - id: 4.2 + text: "Pod Security Policies" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers. + To enable PSA for a namespace in your cluster, set the pod-security.kubernetes.io/enforce label with the policy value you want to enforce. + + kubectl label --overwrite ns NAMESPACE pod-security.kubernetes.io/enforce=restricted + + The above command enforces the restricted policy for the NAMESPACE namespace. You can also enable Pod Security Admission for all your namespaces. For example: + + kubectl label --overwrite ns --all pod-security.kubernetes.io/warn=baseline + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the admission of hostPID containers. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the admission of hostIPC containers. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the admission of hostNetwork containers. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with .spec.allowPrivilegeEscalation set to true. + scored: false + + - id: 4.3 + text: "CNI Plugin" + checks: + - id: 4.3.1 + text: "Ensure latest CNI version is used (Automated)" + type: "manual" + remediation: | + As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Automated)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. Clusters you create with Container Engine for Kubernetes have flannel installed as the default CNI network provider. flannel is a simple overlay virtual network that satisfies the requirements of the Kubernetes networking model by attaching IP addresses to containers. + Although flannel satisfies the requirements of the Kubernetes networking model, it does not support NetworkPolicy resources. If you want to enhance the security of clusters you create with Container Engine for Kubernetes by implementing network policies, you have to install and configure a network provider that does support NetworkPolicy resources. One such provider is Calico (refer to the Kubernetes documentation for a list of other network providers). Calico is an open source networking and network security solution for containers, virtual machines, and native host-based workloads. + Use the Calico open-source software in conjunction with flannel. The Calico Enterprise does not support flannel. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Automated)" + type: "manual" + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution. + The master nodes in a Kubernetes cluster store sensitive configuration data (such asauthentication tokens, passwords, and SSH keys) as Kubernetes secret objects in etcd. Etcd is an open source distributed key-value store that Kubernetes uses for clustercoordination and state management. In the Kubernetes clusters created by Container Engine for Kubernetes, etcd writes and reads data to and from block storage volumes in the Oracle Cloud Infrastructure Block Volume service. Although the data in block storage volumes is encrypted, Kubernetes secrets at rest in etcd itself are not encrypted by default. + For additional security, when you create a new cluster you can specify that Kubernetes secrets at rest in etcd are to be encrypted using the Oracle Cloud Infrastructure Vault service. + scored: false + + - id: 4.5 + text: "General Policies" + checks: + - id: 4.5.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need them. + scored: false + + - id: 4.5.2 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this: + + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + + This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added. + Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path. + scored: false + + - id: 4.5.3 + text: "The default namespace should not be used (Automated)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace. + scored: false diff --git a/job-oke.yaml b/job-oke.yaml new file mode 100644 index 000000000..aac784e4d --- /dev/null +++ b/job-oke.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + spec: + hostPID: true + containers: + - name: kube-bench + image: docker.io/aquasec/kube-bench:latest + command: + [ + "kube-bench", + "run", + "--targets", + "node,controlplane,policies,managedservices", + "--benchmark", + "oke-1.7.0", + ] + volumeMounts: + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + readOnly: true + - name: etc-systemd + mountPath: /etc/systemd + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + - name: home-kubernetes + mountPath: /home/kubernetes + readOnly: true + restartPolicy: Never + volumes: + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" + - name: home-kubernetes + hostPath: + path: "/home/kubernetes"