+ )
+}
+
+
+
+
+
+
diff --git a/docs/features/_category_.json b/docs/features/_category_.json
new file mode 100644
index 000000000..5299b8d4f
--- /dev/null
+++ b/docs/features/_category_.json
@@ -0,0 +1,8 @@
+{
+ "label": "Features",
+ "position": 3,
+ "collapsed": false,
+ "customProps": {
+ "image": "/img/icons/aws.png"
+ }
+}
diff --git a/docs/features/aws-iam/_category_.json b/docs/features/aws-iam/_category_.json
new file mode 100644
index 000000000..793b7a8c4
--- /dev/null
+++ b/docs/features/aws-iam/_category_.json
@@ -0,0 +1,8 @@
+{
+ "label": "AWS IAM",
+ "position": 2,
+ "collapsed": true,
+ "customProps": {
+ "image": "/img/icons/aws.png"
+ }
+}
diff --git a/docs/features/aws-iam/index.mdx b/docs/features/aws-iam/index.mdx
new file mode 100644
index 000000000..a40a74cca
--- /dev/null
+++ b/docs/features/aws-iam/index.mdx
@@ -0,0 +1,59 @@
+---
+sidebar_position: 1
+title: AWS IAM | Overview
+hide_table_of_contents: true
+hide_title: true
+---
+
+import DocsLinkCard from "@site/src/components/LinkCard";
+
+export const tutorials = [
+ {
+ title: 'Automate AWS IAM for EKS',
+ description: 'Create just-in-time AWS IAM roles and policies that are kept in sync with your workloads',
+ url: 'aws-iam/tutorials/aws-iam-eks'
+ },
+];
+
+
+# AWS IAM
+
+Otterize can create just-in-time AWS IAM roles and policies for your workloads running on EKS Kubernetes clusters, greatly simplifying the lifecycle of managing IAM roles and policies.
+
+### Tutorials
+
+To learn how to use the Intents Operator and Credentials Operator to manage just-in-time AWS IAM access, check out the tutorial.
+
+
+
+### How does Otterize work with AWS IAM?
+
+1. First, the EKS cluster must have [Otterize installed](/overview/installation).
+2. To have a role created for a pod, label the pod with `credentials-operator.otterize.com/create-aws-role: "true"`
+3. The credentials operator will create a role and an `AssumeRolePolicy` (trust relationship) bound to the pod's ServiceAccount. The ServiceAccount will be annotated automatically.
+4. At this point, the pod is able to assume the role, but role does not have the ability to perform any actions. We will need to create a ClientIntents YAML for the access the service requires and apply it to our cluster. Below is an example of a ClientIntents file for accessing an S3 bucket. View the [reference](/features/aws-iam/reference) to learn more about the AWS IAM ClientIntents syntax.
+5. Once the intent is applied, the intents operator will create a new policy, which will be attached to the service’s role with the appropriate access.
+6. Done!
+
+```yaml
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: server
+spec:
+ service:
+ name: server
+ calls:
+ - name: arn:aws:s3:::example-bucket-*/*
+ type: aws
+ awsActions:
+ - "s3:PutObject"
+ - "s3:GetObject"
+```
+
+### Automatically generating ClientIntents for AWS IAM
+
+Figuring out which access you need for AWS can be a painful, trial and error process, and something you _must_ do if you're tightening production access.
+
+Otterize is getting ready to release support for using existing traffic to generate least-privilege IAM policies. Keen to try this out as part of early access? Sign up to the [Early Access Beta Program](https://otterize.com/EarlyAccessBetaProgram) and we'll be in touch!
+
diff --git a/docs/features/aws-iam/reference.mdx b/docs/features/aws-iam/reference.mdx
new file mode 100644
index 000000000..fe0517232
--- /dev/null
+++ b/docs/features/aws-iam/reference.mdx
@@ -0,0 +1,47 @@
+---
+sidebar_position: 3
+title: Reference
+---
+
+### ClientIntents example (YAML)
+
+```yaml
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: server
+spec:
+ service:
+ # The name of the pod that will be granted access
+ name: server
+ calls:
+ # The AWS ARN or ARN wildcard that references the resource(s) for the authorization
+ - name: arn:aws:s3:::example-bucket-*/*
+ type: aws
+ # one or more AWS Actions or Action wildcards that will be provided to the specified resources
+ awsActions:
+ - "s3:PutObject"
+ - "s3:GetObject"
+ # Multiple call definitions can be defined for a single service.
+ - name: arn:aws:s3:::read-only-bucket-*/*
+ type: aws
+ awsActions:
+ - "s3:GetObject"
+```
+
+### Annotations
+
+| Key | Description | Default |
+|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
+| `credentials-operator.otterize.com/create-aws-role` | By setting to **true** the credential operator will create an unique AWS Role for the associated pod | `false` |
+
+
+### Helm Chart options
+
+| Key | Description | Default |
+|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|
+| `global.aws.enabled` | Enable or disable AWS integration | `false` |
+| `global.aws.eksClusterNameOverride` | EKS cluster name (overrides auto-detection) | `(none)` |
+| `aws.roleARN` | ARN of the AWS role the operator will use to access AWS. By defeault, Otterize will create a unique role for each service an annotate the service with the role's ARN. | `(none)` |
+
+View the [Helm chart reference](/reference/configuration/otterize-chart) for all other options
\ No newline at end of file
diff --git a/docs/quickstart/visualization/_category_.json b/docs/features/aws-iam/tutorials/_category_.json
similarity index 60%
rename from docs/quickstart/visualization/_category_.json
rename to docs/features/aws-iam/tutorials/_category_.json
index 4564c5be8..bdfe77bf2 100644
--- a/docs/quickstart/visualization/_category_.json
+++ b/docs/features/aws-iam/tutorials/_category_.json
@@ -1,5 +1,5 @@
{
- "label": "Visualization",
+ "label": "Tutorials",
"position": 2,
"collapsed": false
}
diff --git a/docs/quickstart/access-control/aws-iam-eks.mdx b/docs/features/aws-iam/tutorials/aws-iam-eks.mdx
similarity index 98%
rename from docs/quickstart/access-control/aws-iam-eks.mdx
rename to docs/features/aws-iam/tutorials/aws-iam-eks.mdx
index 372a54184..067b9f66d 100644
--- a/docs/quickstart/access-control/aws-iam-eks.mdx
+++ b/docs/features/aws-iam/tutorials/aws-iam-eks.mdx
@@ -18,7 +18,7 @@ In this tutorial, we will:
## Prerequisites
Already have Otterize deployed with the IAM integration configured on your cluster? [Skip to the tutorial.](#tutorial)
-#### 1. Create an AWS EKS cluster
+### 1. Create an AWS EKS cluster
Before you start, you'll need an AWS EKS cluster. Any cluster will do; there are no special requirements or setup.
@@ -83,7 +83,7 @@ Don't forget to configure your kubeconfig for your cluster. If using the example
aws eks update-kubeconfig --region us-west-2 --name otterize-iam-eks-tutorial
```
-#### 2. Deploy Otterize for AWS IAM
+### 2. Deploy Otterize for AWS IAM
To deploy Otterize, head over to [Otterize Cloud](https://app.otterize.com) and:
1. Create a Kubernetes cluster on the [Integrations page](https://app.otterize.com/integrations), and follow the instructions. *Make sure to enable enforcement mode for this tutorial.* If you already have a Kubernetes cluster connected, skip this step.
@@ -185,7 +185,7 @@ kubectl patch deployment -n otterize-tutorial-iam server --type='json' -p="[{\"o
Expand to see the deployment YAML
```yaml
-{@include: ../../../static/code-examples/aws-iam-eks/client-and-server.yaml}
+{@include: ../../../../static/code-examples/aws-iam-eks/client-and-server.yaml}
```
@@ -285,7 +285,7 @@ By annotating the pod, we've created an IAM role. We now need to specify what we
We will specify the following ClientIntents, granting the PutObject permission to the `otterize-tutorial-bucket` S3 bucket.
```yaml
-{@include: ../../../static/code-examples/aws-iam-eks/clientintents.yaml}
+{@include: ../../../../static/code-examples/aws-iam-eks/clientintents.yaml}
```
To apply these intents, run the following command:
@@ -322,7 +322,7 @@ aws s3 ls $BUCKET_NAME
2023-11-17 20:42:55 19 testfile.3.txt
```
-## What's next?
+### What's next?
Try out some of the other quick tutorials to learn about how to use ClientIntents to manage network policies, Istio policies, PostgreSQL access, and more. You can use a single ClientIntents resource to specify all the access required for a pod.
diff --git a/docs/features/istio/_category_.json b/docs/features/istio/_category_.json
new file mode 100644
index 000000000..5807d8821
--- /dev/null
+++ b/docs/features/istio/_category_.json
@@ -0,0 +1,8 @@
+{
+ "label": "Istio",
+ "position": 4,
+ "collapsed": true,
+ "customProps": {
+ "image": "/img/icons/istio-no-word-mark.svg"
+ }
+}
diff --git a/docs/features/istio/index.mdx b/docs/features/istio/index.mdx
new file mode 100644
index 000000000..3d4d0ccfb
--- /dev/null
+++ b/docs/features/istio/index.mdx
@@ -0,0 +1,96 @@
+---
+sidebar_position: 1
+title: Istio | Overview
+hide_title: true
+---
+
+import DocsLinkCard from "@site/src/components/LinkCard";
+
+export const istio_tutorials = [
+ {
+ title: 'Istio AuthorizationPolicy automation',
+ description: 'Generate AuthorizationPolicy docs from existing connections',
+ url: 'istio/tutorials/k8s-istio-authorization-policies'
+ },
+ {
+ title: 'Istio HTTP-level access mapping',
+ description: 'Map access between services including HTTP paths and methods',
+ url: 'istio/tutorials/k8s-istio-watcher'
+ }
+];
+
+
+
+# Istio
+
+Otterize can build a map of your cluster based on Istio Envoy metrics, and enforce access between services using Istio Authorization Policies.
+
+### Tutorials
+
+To learn how to use the Intents Operator to enforce access using Istio authorization policies, or map your cluster, try one of these quickstart tutorials.
+
+
+
+
+### How does Otterize work with Istio?
+
+1. First, the cluster must have [Otterize installed](/overview/installation).
+2. To have Otterize generate Istio authorization policies, declare and apply ([IBAC](/overview/intent-based-access-control)) ClientIntents for your services.
+Once you do so, Otterize will generate an Istio authorization policy allowing access from the client service, identified by its ServiceAccount, to the server, identified by its labels.
+The HTTP Resources section in the ClientIntents is optional: if you do not specify it, all pod-to-pod access is allowed.
+
+```yaml
+
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: client
+ namespace: otterize-tutorial-istio
+spec:
+ service:
+ name: client
+ calls:
+ - name: server-abc
+ type: http
+ HTTPResources:
+ - path: /client-path
+ methods: [ GET ]
+
+```
+
+3. If you would like Otterize to be able to autogenerate ClientIntents and map your network at the HTTP path and method level, we need to enable connection telemetry data within Istio. This can be enabled with the following YAML:
+
+```yaml
+apiVersion: telemetry.istio.io/v1alpha1
+kind: Telemetry
+metadata:
+ name: mesh-default
+ namespace: istio-system
+spec:
+ # Configure access logging for Istio services
+ accessLogging:
+ # Define the access logging provider, in this case, Envoy
+ - providers:
+ - name: envoy
+ metrics:
+ # Configure metrics collection for Istio services using Prometheus
+ - providers:
+ - name: prometheus
+ # Customize metric tag overrides
+ overrides:
+ - tagOverrides:
+ # Map the "request_method" metric tag to "request.method" value
+ request_method:
+ value: request.method
+ # Map the "request_path" metric tag to "request.path" value
+ request_path:
+ value: request.path
+```
+
+Or, as a ready-to-paste command:
+```
+kubectl apply -f ${ABSOLUTE_URL}/code-examples/network-mapper/istio-telemetry-enablement.yaml -n istio-system
+```
+
+Once installed, Otterize will query Envoy sidecars for known connections and build an in-memory map of the relationships. After the map is built you can then view those relationships.
+
diff --git a/docs/features/istio/reference.mdx b/docs/features/istio/reference.mdx
new file mode 100644
index 000000000..edf4247bb
--- /dev/null
+++ b/docs/features/istio/reference.mdx
@@ -0,0 +1,38 @@
+---
+sidebar_position: 3
+title: Reference
+---
+
+### ClientIntents example (YAML)
+
+```yaml
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: client
+ namespace: otterize-tutorial-istio
+spec:
+ service:
+ name: client
+ calls:
+ - name: nginx
+ type: http
+ HTTPResources:
+ - path: /client-path
+ methods: [ GET ]
+```
+
+
+### Helm Chart options
+
+| Key | Description | Default |
+|---------------------------------|-----------------------------------------|--------------------------------|
+| `istiowatcher.enable` | Enable Istio watcher deployment (beta). | `false` |
+| `istiowatcher.image.repository` | Istio watcher image repository. | `otterize` |
+| `istiowatcher.image.image` | Istio watcher image. | `network-mapper-istio-watcher` |
+| `istiowatcher.image.tag` | Istio watcher image tag. | `latest` |
+| `istiowatcher.pullPolicy` | Istio watcher pull policy. | `(none)` |
+| `istiowatcher.pullSecrets` | Istio watcher pull secrets. | `(none)` |
+| `istiowatcher.resources` | Resources override. | `(none)` |
+
+View the [Helm chart reference](/reference/configuration/otterize-chart) for all other options
\ No newline at end of file
diff --git a/docs/features/istio/tutorials/_category_.json b/docs/features/istio/tutorials/_category_.json
new file mode 100644
index 000000000..bdfe77bf2
--- /dev/null
+++ b/docs/features/istio/tutorials/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Tutorials",
+ "position": 2,
+ "collapsed": false
+}
diff --git a/docs/quickstart/access-control/k8s-istio-authorization-policies.mdx b/docs/features/istio/tutorials/k8s-istio-authorization-policies.mdx
similarity index 84%
rename from docs/quickstart/access-control/k8s-istio-authorization-policies.mdx
rename to docs/features/istio/tutorials/k8s-istio-authorization-policies.mdx
index 41582428c..11f392bf9 100644
--- a/docs/quickstart/access-control/k8s-istio-authorization-policies.mdx
+++ b/docs/features/istio/tutorials/k8s-istio-authorization-policies.mdx
@@ -24,49 +24,19 @@ In this tutorial, we will:
## Prerequisites
-
-Prepare a Kubernetes cluster
-
-Before you start, you'll need a Kubernetes cluster. Having a cluster with a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) that supports [NetworkPolicies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) isn't required for this tutorial, but is recommended so that your cluster works with other tutorials.
-
-{@include: ../../_common/cluster-setup.md}
-
-
-
-You can now install (or reinstall) Otterize in your cluster, and optionally connect to Otterize Cloud. Connecting to Cloud lets you:
+Already have Otterize deployed with Istio configured on your cluster? Skip to the [tutorial](#tutorial).
-1. See what's happening visually in your browser, through the "access graph";
+### 1. Deploy Otterize
+If you do not have a cluster, we will need to prepare one with [network policy support](/overview/installation#create-a-cluster-with-support-for-network-policies)
-So either forego browser visualization and:
-
-
-Install Otterize in your cluster, without Otterize Cloud
-
-{@include: ../../_common/install-otterize-istio-enabled.md}
-
-
+To deploy Otterize, head over to [Otterize Cloud](https://app.otterize.com) and associate a Kubernetes cluster on the [Clusters page](https://app.otterize.com/clusters), and follow the instructions. If you already have a Kubernetes cluster connected, skip this step.
-Or choose to include browser visualization and:
-
-
-Install Otterize in your cluster, with Otterize Cloud
-
-#### Create an Otterize Cloud account
-
-{@include: ../../_common/create-account.md}
-
-#### Install Otterize OSS, connected to Otterize Cloud
-
-{@include: ../../_common/install-otterize-from-cloud-with-istio-enforcement.md}
-
-
-
-## Install and configure Istio
+### 2. Install and configure Istio
Install Istio in the cluster via Helm
-{@include: ../../_common/install-istio.md}
+{@include: ../../../_common/install-istio.md}
@@ -79,7 +49,7 @@ kubectl apply -f ${ABSOLUTE_URL}/code-examples/network-mapper/istio-telemetry-en
```
```yaml
-{@include: ../../../static/code-examples/network-mapper/istio-telemetry-enablement.yaml}
+{@include: ../../../../static/code-examples/network-mapper/istio-telemetry-enablement.yaml}
```
@@ -91,7 +61,9 @@ e.g. in the access graph, but we can also use that information to automatically
enforce them with Istio authorization policies.
:::
-## Deploy the two clients and the server
+## Tutorial
+
+### Deploy the two clients and the server
Deploy a simple example consisting of `client` and `other-client` calling `nginx` over HTTP:
@@ -99,7 +71,7 @@ Deploy a simple example consisting of `client` and `other-client` calling `nginx
kubectl apply -n otterize-tutorial-istio -f ${ABSOLUTE_URL}/code-examples/istio-authorization-policies/all.yaml
```
-## Apply intents
+### Apply intents
We will now declare that the **client** intends to call the **server** at a particular HTTP path using a specific HTTP method.
@@ -116,7 +88,7 @@ You can click on the services or the lines connecting them to see which ClientIn
1. Here is the `intents.yaml` declaration of the client, which we will apply below:
```yaml
-{@include: ../../../static/code-examples/istio-authorization-policies/intents.yaml}
+{@include: ../../../../static/code-examples/istio-authorization-policies/intents.yaml}
```
To apply it, use:
@@ -195,7 +167,7 @@ kubectl apply -f ${ABSOLUTE_URL}/code-examples/istio-authorization-policies/inte
```
:::tip
-Client intents are the cornerstone of [intent-based access control (IBAC)](/intent-based-access-control).
+Client intents are the cornerstone of [intent-based access control (IBAC)](/overview/intent-based-access-control).
::: 2. You should quickly see in the **[other-client]** terminal that it times out when calling the server,
as expected since it didn't declare its intents:
@@ -250,7 +222,7 @@ It's now clear what happened:
Otterize did its job of both protecting the server _and_ allowing intended access.
:::
-## What did we accomplish?
+### What did we accomplish?
- Controlling access through Istio authorization policies no longer means touching authorization policies at all.
@@ -284,7 +256,7 @@ all the appropriate configuration was managed automatically behind the scenes.
Try to create an intents file yourself for **client-other**, and apply it to allow this other client to call the server.
:::
-## What's next
+### What's next
- Get started with the [Otterize network mapper for Istio](/quickstart/visualization/k8s-istio-watcher) to help you bootstrap intents files with HTTP resources
for use in [intent-based access control (IBAC)](/intent-based-access-control).
diff --git a/docs/quickstart/visualization/k8s-istio-watcher.mdx b/docs/features/istio/tutorials/k8s-istio-watcher.mdx
similarity index 68%
rename from docs/quickstart/visualization/k8s-istio-watcher.mdx
rename to docs/features/istio/tutorials/k8s-istio-watcher.mdx
index 93f87c243..85679169d 100644
--- a/docs/quickstart/visualization/k8s-istio-watcher.mdx
+++ b/docs/features/istio/tutorials/k8s-istio-watcher.mdx
@@ -18,67 +18,18 @@ In this tutorial, we will:
## Prerequisites
-
-Prepare a Kubernetes cluster
-
-Before you start, you'll need a Kubernetes cluster. Having a cluster with a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) that supports [NetworkPolicies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) isn't required for this tutorial, but is recommended so that your cluster works with other tutorials.
-
-{@include: ../../_common/cluster-setup.md}
-
-
+Already have Otterize deployed with Istio configured on your cluster? Skip to the [tutorial](#tutorial).
-You can now install Otterize in your cluster, and optionally connect to Otterize Cloud. Connecting to Cloud lets you:
+### 1. Deploy Otterize
+To deploy Otterize, head over to [Otterize Cloud](https://app.otterize.com) and associate a Kubernetes cluster on the [Clusters page](https://app.otterize.com/clusters), and follow the instructions. If you already have a Kubernetes cluster connected, skip this step.
-1. See what's happening visually in your browser, through the "access graph";
-So either forego browser visualization and:
-
-
-Install the Otterize network mapper in your cluster with the Istio watcher component enabled, and without Otterize Cloud
-
-{@include: ../../_common/install-otterize-istio-watcher.md}
-
-
-
-Or choose to include browser visualization and:
-
-
-Install Otterize in your cluster, with Otterize Cloud
-
-#### Create an Otterize Cloud account
-
-{@include: ../../_common/create-account.md}
-
-#### Install Otterize OSS, connected to Otterize Cloud
-
-{@include: ../../_common/install-otterize-from-cloud-with-enforcement-with-istiowatcher.md}
-
-
-
-Finally, you'll need to install the Otterize CLI (if you haven't already) to interact with the network mapper:
-
-
-Install the Otterize CLI
-
-{@include: ../../_common/install-otterize-cli.md}
-
-
-
-## Install and configure Istio
+### 2. Install and configure Istio
Install Istio in the cluster via Helm
-{@include: ../../_common/install-istio.md}
-
-
-
-Create a namespace for our demo application and label it for Istio injection
-
-```shell
-kubectl create namespace otterize-tutorial-istio-mapping
-kubectl label namespace otterize-tutorial-istio-mapping istio-injection=enabled
-```
+{@include: ../../../_common/install-istio.md}
@@ -91,7 +42,7 @@ kubectl apply -f ${ABSOLUTE_URL}/code-examples/network-mapper/istio-telemetry-en
```
```yaml
-{@include: ../../../static/code-examples/network-mapper/istio-telemetry-enablement.yaml}
+{@include: ../../../../static/code-examples/network-mapper/istio-telemetry-enablement.yaml}
```
@@ -103,16 +54,20 @@ e.g. in the access graph, but we can also use that information to automatically
enforce them with Istio authorization policies.
:::
-## Deploy demo to simulate traffic
+## Tutorial
+
+### Deploy demo to simulate traffic
-Let's add services and traffic to the cluster and see how the network mapper builds the map.
+Let's create a namespace with istio enabled, and add services and traffic to the cluster and see how the network mapper builds the map.
Deploy the following simple example — `client`, `client2` and `nginx`, communicating over HTTP:
```shell
+kubectl create namespace otterize-tutorial-istio-mapping
+kubectl label namespace otterize-tutorial-istio-mapping istio-injection=enabled
kubectl apply -n otterize-tutorial-istio-mapping -f ${ABSOLUTE_URL}/code-examples/network-mapper/istio.yaml
```
-## Map the cluster
+### Map the cluster
The Istio watcher component of the network mapper starts querying Envoy sidecars for HTTP connections and builds an
in-memory network map as soon as it's installed. The Otterize CLI allows you to interact with the network mapper to
@@ -122,7 +77,7 @@ For a complete list of the CLI capabilities read the [CLI command reference](/re
### Extract and see the network map
-{@include: ../../getting-started/_show_mapped_istio_traffic_cli.mdx}
+{@include: ../../../getting-started/_show_mapped_istio_traffic_cli.mdx}
### Show the access graph in Otterize Cloud
@@ -144,7 +99,7 @@ The access graph reveals several types of information and insights, such as:
3. Filtering the map to include recently-seen traffic, since some date in the past. That way you can eliminate calls that are no longer relevant, without having to reset the network mapper and start building a new map.
4. Showing more specifics about access, if the intents operator is also connected: understand which services are protected or would be protected, and which client calls are being blocked or would be blocked. We'll see more of that in the Istio AuthorizationPolicy tutorial.
-## What's next
+### What's next
The network mapper is a great way to bootstrap IBAC. It generates client intents files that reflect
the current topology of your services; those can then be used by each client team to grant them easy
@@ -157,7 +112,7 @@ Where to go next?
- If you haven't already, see the [automate network policies tutorial](/quickstart/access-control/k8s-network-policies).
- Or go to the next tutorial to [automate secure access for Kafka](/quickstart/access-control/k8s-kafka-mtls).
-### Teardown
+## Teardown
To remove Istio and the deployed examples run:
diff --git a/docs/features/kafka/_category_.json b/docs/features/kafka/_category_.json
new file mode 100644
index 000000000..f48b40857
--- /dev/null
+++ b/docs/features/kafka/_category_.json
@@ -0,0 +1,9 @@
+{
+ "label": "Kafka",
+ "position": 3,
+ "collapsed": true,
+ "customProps": {
+ "image": "/img/icons/kafka-no-word-mark.svg",
+ "cloud-only": "true"
+ }
+}
diff --git a/docs/features/kafka/index.mdx b/docs/features/kafka/index.mdx
new file mode 100644
index 000000000..c3178535d
--- /dev/null
+++ b/docs/features/kafka/index.mdx
@@ -0,0 +1,129 @@
+---
+sidebar_position: 1
+title: Kafka | Overview
+hide_table_of_contents: true
+hide_title: true
+---
+
+import DocsLinkCard from "@site/src/components/LinkCard";
+
+export const kafka_tutorials = [
+ {
+ title: 'Kafka topic-level access mapping',
+ description: 'View Kafka network connections',
+ url: '/features/kafka/tutorials/k8s-kafka-mapping'
+ },
+ {
+ title: 'Kafka access automation using Otterize Cloud mTLS',
+ description: 'Manage access to Kafka topics with Otterize Cloud mTLS',
+ url: '/features/kafka/tutorials/k8s-kafka-mtls'
+ },
+ {
+ title: 'Kafka access automation using cert-manager mTLS',
+ description: 'Manage access to Kafka topics with a cert-manager',
+ url: '/features/kafka/tutorials/k8s-kafka-mtls-cert-manager'
+ }
+];
+
+# Kafka
+
+Otterize can build a map of your cluster, and enforce access between services using Kafka ACLs. Otterize is also able to map access to the Kafka topic and operation level, by reading Kafka access logs.
+
+### Tutorials
+
+To learn how to use the Intents Operator and Credentials Operator to enforce access with Kafka ACLs, or how to map your cluster to the Kafka topic and operation level, try one of these quickstart tutorials.
+
+
+
+### How does Otterize work with Kafka?
+
+1. First, the cluster must have [Otterize installed](/overview/installation).
+2. Configure the Otterize Intents Operator to manage a Kafka broker by declaring a `KafkaServerConfig`. See the example yaml below.
+
+```yaml
+apiVersion: k8s.otterize.com/v1alpha3
+kind: KafkaServerConfig
+metadata:
+ name: kafkaserverconfig
+ namespace: kafka
+spec:
+ service:
+ name: kafka #name of the Kafka service broker
+ addr: kafka.kafka:9092
+```
+
+Or, as a ready-to-paste command:
+```bash
+kubectl apply -f ${ABSOLUTE_URL}/code-examples/kafka-mtls/kafkaserverconfig.yaml
+```
+
+The Kafka broker must be configured with a superuser for the Intents Operator to be able to set ACLs, with the ACL authorizer enabled, and to use mTLS, which the Intents Operator and other clients will use to authenticate.
+Here's an example configuration based on the `values.yaml` of the Bitnami Kafka Helm chart. To see a working example, check out the tutorials for Kafka.
+
+```yaml
+superUsers: User:CN=intents-operator.otterize-system
+allowEveryoneIfNoAclFound: true
+podAnnotations:
+ credentials-operator.otterize.com/cert-type: jks
+ credentials-operator.otterize.com/tls-secret-name: kafka-tls-secret
+ credentials-operator.otterize.com/dns-names: "kafka-0.kafka-headless.kafka.svc.cluster.local,kafka.kafka.svc.cluster.local"
+# Authenticate clients using mTLS
+auth:
+ clientProtocol: mtls
+ interBrokerProtocol: mtls
+ tls:
+ type: jks
+ existingSecrets:
+ - kafka-tls-secret
+ password: password
+ jksTruststore: truststore.jks
+ jksKeystoreSAN: keystore.jks
+authorizerClassName: kafka.security.authorizer.AclAuthorizer
+```
+
+To acquire TLS credentials for another pod, specify a Pod annotation with the required TLS secret name.
+```yaml
+spec:
+ template:
+ metadata:
+ annotations:
+ # 1. Generate credentials as a secret called "client-credentials-secret":
+ credentials-operator.otterize.com/tls-secret-name: client-credentials-secret
+ ...
+ spec:
+ volumes:
+ # 2. Create a volume containing this secret:
+ - name: otterize-credentials
+ secret:
+ secretName: client-credentials-secret
+ ...
+ containers:
+ - name: client
+ ...
+ volumeMounts:
+ # 3. Mount volume into container
+ - name: otterize-credentials
+ mountPath: /var/otterize/credentials
+ readOnly: true
+```
+
+4. Once the Kafka clients are deployed with the mTLS credentials, they should be able to access topics as we have enabled `allowEveryoneIfNoAclFound`. To begin enforcing, declare ClientIntents, which will cause the Intents Operator to configure ACLs allowing this access.
+
+```yaml
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: client
+ namespace: otterize-tutorial-kafka-mtls
+spec:
+ service:
+ name: client
+ calls:
+ - name: kafka.kafka
+ type: kafka
+ kafkaTopics:
+ - name: mytopic
+ operations: [ produce,describe,consume ]
+ - name: transactions
+ operations: [ produce,describe,consume ]
+```
diff --git a/docs/features/kafka/reference.mdx b/docs/features/kafka/reference.mdx
new file mode 100644
index 000000000..a2f704193
--- /dev/null
+++ b/docs/features/kafka/reference.mdx
@@ -0,0 +1,60 @@
+---
+sidebar_position: 3
+title: Reference
+---
+
+### KafkaServerConfig example (YAML)
+
+```yaml
+apiVersion: k8s.otterize.com/v1alpha3
+kind: KafkaServerConfig
+metadata:
+ name: kafkaserverconfig
+ namespace: kafka
+spec:
+ service:
+ # name of the Kafka service broker
+ name: kafka
+ addr: kafka.kafka:9092
+```
+
+### ClientIntents example (YAML)
+
+```yaml
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: client
+ namespace: otterize-tutorial-kafka-mtls
+spec:
+ service:
+ # The service requiring access to a topic
+ name: client
+ calls:
+ # name of the Kafka service broker
+ - name: kafka.kafka
+ type: kafka
+ kafkaTopics:
+ # Topic name
+ - name: mytopic
+ # ACL Operations including alter, delete, all, etc
+ operations: [ produce,describe,consume ]
+ # Multiple topics can be added
+ - name: transactions
+ operations: [ produce,describe,consume ]
+```
+
+### Helm Chart options
+
+| Key | Description | Default |
+|---------------------------------|-------------------------------------------------------------|--------------------------------|
+| `kafkawatcher.enable` | Enable Kafka watcher deployment (beta). | `false` |
+| `kafkawatcher.image.repository` | Kafka watcher image repository. | `otterize` |
+| `kafkawatcher.image.image` | Kafka watcher image. | `network-mapper-kafka-watcher` |
+| `kafkawatcher.image.tag` | Kafka watcher image tag. | `latest` |
+| `kafkawatcher.pullPolicy` | Kafka watcher pull policy. | `(none)` |
+| `kafkawatcher.pullSecrets` | Kafka watcher pull secrets. | `(none)` |
+| `kafkawatcher.resources` | Resources override. | `(none)` |
+| `kafkawatcher.kafkaServers` | Kafka servers to watch, specified as `pod.namespace` items. | `(none)` |
+
+View the [Helm chart reference](/reference/configuration/otterize-chart) for all other options
\ No newline at end of file
diff --git a/docs/features/kafka/tutorials/_category_.json b/docs/features/kafka/tutorials/_category_.json
new file mode 100644
index 000000000..bdfe77bf2
--- /dev/null
+++ b/docs/features/kafka/tutorials/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Tutorials",
+ "position": 2,
+ "collapsed": false
+}
diff --git a/docs/quickstart/visualization/k8s-kafka-mapping.mdx b/docs/features/kafka/tutorials/k8s-kafka-mapping.mdx
similarity index 70%
rename from docs/quickstart/visualization/k8s-kafka-mapping.mdx
rename to docs/features/kafka/tutorials/k8s-kafka-mapping.mdx
index c4bdc8a9d..66721cf2e 100644
--- a/docs/quickstart/visualization/k8s-kafka-mapping.mdx
+++ b/docs/features/kafka/tutorials/k8s-kafka-mapping.mdx
@@ -19,58 +19,12 @@ We will **not** be doing any access control in this demo, just purely mapping cl
## Prerequisites
-
-Prepare a Kubernetes cluster
-
-Before you start, you'll need a Kubernetes cluster. Having a cluster with a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) that supports [NetworkPolicies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) isn't required for this tutorial, but is recommended so that your cluster works with other tutorials.
-
-{@include: ../../_common/cluster-setup.md}
-
-
-
-You can now install Otterize in your cluster, and optionally connect to Otterize Cloud. Connecting to Cloud lets you
-see what's happening visually in your browser, through the "access graph".
-
-So either forego browser visualization and:
-
-
-Install Otterize in your cluster with the Kafka watcher component enabled, without Otterize Cloud
-
-```
-helm repo add otterize https://helm.otterize.com
-helm repo update
-helm install otterize otterize/network-mapper -n otterize-system --create-namespace \
---set kafkawatcher.enable=true \
---set kafkawatcher.kafkaServers={"kafka-0.kafka"}
-```
-
-
+Already have Otterize & a Kafka broker deployed on your cluster? Skip to the [tutorial](#tutorial).
-Or choose to include browser visualization and:
+### 1. Deploy Otterize
+To deploy Otterize, head over to [Otterize Cloud](https://app.otterize.com) and associate a Kubernetes cluster on the [Clusters page](https://app.otterize.com/clusters), and follow the instructions. If you already have a Kubernetes cluster connected, skip this step.
-
-Install Otterize in your cluster, with Otterize Cloud
-
-#### Create an Otterize Cloud account
-
-{@include: ../../_common/create-account.md}
-
-#### Install Otterize OSS, connected to Otterize Cloud
-
-{@include: ../../_common/install-otterize-from-cloud-with-shadow-mode-and-kafka-watcher.md}
-
-
-
-Finally, you'll need to install the Otterize CLI (if you haven't already) to interact with the network mapper:
-
-
-Install the Otterize CLI
-
-{@include: ../../_common/install-otterize-cli.md}
-
-
-
-## Install Kafka
+### 2. Install Kafka
We will deploy a Kafka broker using Bitnami's [Helm chart](https://github.com/bitnami/charts/tree/master/bitnami/kafka).
In the chart we will configure Kafka to:
@@ -82,7 +36,7 @@ In the chart we will configure Kafka to:
Expand to see the Helm values.yaml used with the Bitnami chart
```yaml
-{@include: ../../../static/code-examples/kafka-mapping/helm/values.yaml}
+{@include: ../../../../static/code-examples/kafka-mapping/helm/values.yaml}
```
@@ -96,7 +50,9 @@ helm install --create-namespace -n kafka \
-f ${ABSOLUTE_URL}/code-examples/kafka-mapping/helm/values.yaml kafka bitnami/kafka --version 21.4.4
```
-## Deploy demo to simulate traffic
+## Tutorial
+
+### Deploy demo to simulate traffic
Let's add a few services that will access our Kafka server, and see how the network mapper builds the access map:
@@ -142,7 +98,7 @@ Only the arrows between the clients and the Kafka are green, because we've selec
Clicking on a specific arrow between a client and the broker reveals which topic and operations are being accessed.
-## What did we accomplish?
+### What did we accomplish?
Enabling the Kafka watcher component of the network mapper shows which clients connect to running Kafka servers, the topics they access, and the operations they undertake on those topics.
@@ -152,7 +108,7 @@ You can consume this information in various ways:
- [Via the CLI](/reference/cli): from the network mapper directly or the cloud.
- [Via the API](https://app.otterize.com/api/rest/v1beta).
-## What's next
+### What's next
- Try our [secure access for Kafka](/quickstart/access-control/k8s-kafka-mtls) tutorial
diff --git a/docs/quickstart/access-control/k8s-kafka-mtls-cert-manager.mdx b/docs/features/kafka/tutorials/k8s-kafka-mtls-cert-manager.mdx
similarity index 83%
rename from docs/quickstart/access-control/k8s-kafka-mtls-cert-manager.mdx
rename to docs/features/kafka/tutorials/k8s-kafka-mtls-cert-manager.mdx
index 4b8813a89..cccb495ac 100644
--- a/docs/quickstart/access-control/k8s-kafka-mtls-cert-manager.mdx
+++ b/docs/features/kafka/tutorials/k8s-kafka-mtls-cert-manager.mdx
@@ -22,20 +22,9 @@ In this tutorial, we will:
## Prerequisites
-### Prepare a Kubernetes cluster
+### 1.Install cert-manager and configure a CA issuer
-
-Expand for cluster setup instructions
-
-Before you start, you'll need a Kubernetes cluster.
-
-{@include: ../../_common/cluster-setup.md}
-
-
-
-### Install cert-manager and configure a CA issuer
-
-#### Install cert-manager
+##### Install cert-manager
Use the following command or [follow cert-manager's installation guide for different setups](https://cert-manager.io/docs/installation/):
@@ -43,14 +32,14 @@ Use the following command or [follow cert-manager's installation guide for diffe
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.0/cert-manager.yaml
```
-#### Set up a CA (Certificate Authority) `ClusterIssuer`
+##### Set up a CA (Certificate Authority) `ClusterIssuer`
:::caution
This tutorial uses the built-in and easy-to-setup `CA` Issuer type so that the tutorial is easy to run, but you should not use this issuer as-is in production. Instead, consider using one of the other issuers, such as a Venafi or Vault issuer. [Read more about CA issuers in the cert-manager documentation](https://cert-manager.io/docs/configuration/ca/).
:::
```yaml
-{@include: ../../../static/code-examples/kafka-mtls-cert-manager/clusterissuer.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls-cert-manager/clusterissuer.yaml}
```
Deploy this `ClusterIssuer`:
@@ -61,50 +50,26 @@ kubectl apply -f ${ABSOLUTE_URL}/code-examples/kafka-mtls-cert-manager/clusteris
You may have to wait for `cert-manager` to start successfully before you are able to deploy the `ClusterIssuer`.
-### Install Otterize
-
-You can now install Otterize in your cluster, and connect to Otterize Cloud. Connecting to Cloud lets you:
-
-1. See what's happening visually in your browser, through the "access graph";
-2. Generate certificates using the Otterize Cloud hosted service. If you prefer to generate certificates in-cluster, you can [follow the tutorial for cert-manager](/quickstart/access-control/k8s-kafka-mtls-cert-manager).
-
-
-
-
-
-{@include: ../../_common/install-otterize-from-cloud-with-shadow-mode-and-kafka-watcher-and-cert-manager.md}
-
-#### Configure the access graph in Otterize Cloud to only show Kafka authorization status
+### 2. Install Otterize
-You want to make sure that under **Istio Policies** _Use in access graph_ is turned off and that under **Network Policies** _Use in access graph_ is also turned off.
+Already have Otterize & a Kafka broker deployed on your cluster? Skip to the [tutorial](#tutorial).
-Keep _Use in access graph_ **on** under **Kafka ACLs** so that the access graph only shows the authorization status for Kafka ACLs.
+#### Deploy Otterize
+To deploy Otterize, head over to [Otterize Cloud](https://app.otterize.com) and associate a Kubernetes cluster on the [Clusters page](https://app.otterize.com/clusters), and follow the instructions. If you already have a Kubernetes cluster connected, skip this step.
-![Make sure access graph is configured correctly](../../../static/img/quick-tutorials/k8s-kafka-mtls-cert-mgr//cloud-settings.png)
-
-
-
-
-To install without connecting to Otterize Cloud and no access graph, run the following command:
-
-```shell
-helm repo add otterize https://helm.otterize.com
-helm repo update
-
-helm upgrade --install otterize otterize/otterize-kubernetes -n otterize-system --create-namespace \
---set intentsOperator.operator.mode=defaultShadow \
---set global.deployment.credentialsOperator=true \
---set global.certificateProvider=cert-manager \
---set credentialsOperator.certManager.issuerName=ca-issuer \
+##### Note:
+* Under mTLS and Kafka support choose **cert-manager**.
+* Copy the Helm command and add the following flags:
+```bash
--set intentsOperator.operator.enableNetworkPolicyCreation=false \
--set networkMapper.kafkawatcher.enable=true \
--set networkMapper.kafkawatcher.kafkaServers={"kafka-0.kafka"}
```
-
-
+Note that enforcement is disabled, we will enable it later. The configuration tab should look like this:
+
-## Install Kafka
+### 3. Install Kafka
We will deploy a Kafka broker using Bitnami's [Helm chart](https://github.com/bitnami/charts/tree/master/bitnami/kafka).
In the chart we will configure Kafka to:
@@ -119,7 +84,7 @@ In the chart we will configure Kafka to:
Expand to see the Helm values.yaml used with the Bitnami chart
```yaml
-{@include: ../../../static/code-examples/kafka-mtls-cert-manager/helm/values.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls-cert-manager/helm/values.yaml}
```
@@ -135,7 +100,7 @@ helm install --create-namespace -n kafka \
You can watch for all pods to be `Ready` using `kubectl get pods -n kafka -w`.
-## Configure Otterize to manage Kafka access
+### 4. Configure Otterize to manage Kafka access
In our simple example, we'll call the Kafka broker service simply "kafka".
Let's tell Otterize how to connect to the Kafka broker by applying an Otterize `KafkaServerConfig`, naming it `kafka`. The name will be the name we later use to declare `ClientIntents`.
@@ -145,10 +110,12 @@ kubectl apply -f ${ABSOLUTE_URL}/code-examples/kafka-mtls-cert-manager/kafkaserv
```
```yaml
-{@include: ../../../static/code-examples/kafka-mtls-cert-manager/kafkaserverconfig.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls-cert-manager/kafkaserverconfig.yaml}
```
-## Deploy clients
+## Tutorial
+
+### Deploy Clients
Our simple example consists of two client pods:
@@ -203,7 +170,7 @@ spec:
```yaml
-{@include: ../../../static/code-examples/kafka-mtls-cert-manager/client-deployment.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls-cert-manager/client-deployment.yaml}
```
@@ -211,7 +178,7 @@ spec:
```yaml
-{@include: ../../../static/code-examples/kafka-mtls-cert-manager/client-other-deployment.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls-cert-manager/client-other-deployment.yaml}
```
@@ -337,7 +304,7 @@ You can now browse to your account at [https://app.otterize.com](https://app.ott
The access graph shows, through its green and orange lines linking the services, that no clients are currently blocked because we haven't enabled any sort of enforcement yet. The orange lines indicate that, since we have not declared any intents for these clients, they _would_ be blocked if we were to turn enforcement on.
-## Apply intents
+### Apply intents
:::tip
@@ -348,7 +315,7 @@ You can click on the services or the lines connecting them to see which ClientIn
1. The client declares its intent to call the `kafka.kafka` server with this `intents.yaml` file:
```yaml
-{@include: ../../../static/code-examples/kafka-mtls-cert-manager/client-intents.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls-cert-manager/client-intents.yaml}
```
We can apply intents for the `client` by applying the `client-intents.yaml` file:
@@ -361,11 +328,11 @@ If you go back to your access graph, you'll now see that the `client` has a soli
If you click on that solid line, you will see that the declared intents match the discovered intents, so access is assured.
-![client intents applied](../../../static/img/quick-tutorials/k8s-kafka-mtls-cert-mgr/client-intents.png)
+![client intents applied](/img/quick-tutorials/k8s-kafka-mtls-cert-mgr/client-intents.png)
2. At this point, since the Kafka server is not actually protected, the `client-other` can still access the topics. The line is orange, indicating that it has no declared intents.
-![Declared Intent](../../../static/img/quick-tutorials/k8s-kafka-mtls-cert-mgr/declared-intent.png)
+![Declared Intent](/img/quick-tutorials/k8s-kafka-mtls-cert-mgr/declared-intent.png)
We can see what happened:
@@ -373,7 +340,7 @@ We can see what happened:
2. Calls from **[client-other]** are not declared (orange line).
3. Looking at the Kafka service, we can see that **[client]** has specific access configured (via Kafka ACLs) to perform `all` operations on the `mytopic` topic.
-## Turn on protection
+### Turn on protection
At this point, we haven't actually protected our Kafka broker. From everything we've done so far, we can see, however, that if we were to turn on protection, the `client-other` would lose access to the broker.
@@ -382,7 +349,7 @@ Let's see that in action. Our clients that have not declared intents will be blo
We need to turn protection on in for this Kafka broker by declaring it as a protected service:
```yaml
-{@include: ../../../static/code-examples/kafka-mtls-cert-manager/protectedservice.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls-cert-manager/protectedservice.yaml}
```
Apply this `ProtectedService` resource:
@@ -406,9 +373,9 @@ error="kafka server: The client is not authorized to access this topic
And if you look back at your access graph, you'll see that the Kafka broker is now protected, and that the `client-other` and `client-authenticated` are blocked.
-![Clients blocked](../../../static/img/quick-tutorials/k8s-kafka-mtls-cert-mgr//clients-blocked.png)
+![Clients blocked](/img/quick-tutorials/k8s-kafka-mtls-cert-mgr//clients-blocked.png)
-## What did we accomplish?
+### What did we accomplish?
- Controlling Kafka access no longer means touching ACLs, issuing and managing and distributing certs, establishing trust,
etc.
@@ -454,7 +421,7 @@ This was achieved by using the built-in Kafka ACL mechanism, which the intents o
-## What's next
+### What's next
- [Learn more about credentials-operator works with cert-manager](/reference/configuration/credentials-operator#cert-manager).
- [Enable the credentials-operator `CertificateRequest` auto-approver](/reference/configuration/credentials-operator/helm-chart#cert-manager-parameters) for production deployments of cert-manager where the default auto-approver is disabled.
diff --git a/docs/quickstart/access-control/k8s-kafka-mtls.mdx b/docs/features/kafka/tutorials/k8s-kafka-mtls.mdx
similarity index 86%
rename from docs/quickstart/access-control/k8s-kafka-mtls.mdx
rename to docs/features/kafka/tutorials/k8s-kafka-mtls.mdx
index 75c58068c..e1fe429a0 100644
--- a/docs/quickstart/access-control/k8s-kafka-mtls.mdx
+++ b/docs/features/kafka/tutorials/k8s-kafka-mtls.mdx
@@ -21,36 +21,23 @@ In this tutorial, we will:
## Prerequisites
-### Prepare a Kubernetes cluster
+Already have Otterize & a Kafka broker deployed on your cluster? Skip to the [tutorial](#tutorial).
-
-Expand for cluster setup instructions
-
-Before you start, you'll need a Kubernetes cluster.
-
-{@include: ../../_common/cluster-setup.md}
-
-
-
-### Install Otterize
-
-You can now install Otterize in your cluster, and connect to Otterize Cloud. Connecting to Cloud lets you:
+### 1. Deploy Otterize
+To deploy Otterize, head over to [Otterize Cloud](https://app.otterize.com) and associate a Kubernetes cluster on the [Clusters page](https://app.otterize.com/clusters), and follow the instructions. If you already have a Kubernetes cluster connected, skip this step.
-1. See what's happening visually in your browser, through the "access graph".
-2. Generate certificates using the Otterize Cloud hosted service. If you prefer to generate certificates in-cluster, you can [follow the tutorial for cert-manager](/quickstart/access-control/k8s-kafka-mtls-cert-manager).
-
-#### Install Otterize OSS, connected to Otterize Cloud
-{@include: ../../_common/install-otterize-from-cloud-with-shadow-mode-and-kafka-watcher.md}
-
-#### Configure the access graph in Otterize Cloud to only show Kafka authorization status
-
-You want to make sure that under **Istio Policies** _Use in access graph_ is turned off and that under **Network Policies** _Use in access graph_ is also turned off.
-
-Keep _Use in access graph_ **on** under **Kafka ACLs** so that the access graph only shows the authorization status for Kafka ACLs.
+##### Note:
+* Under mTLS and Kafka support choose **Otterize Cloud**.
+* Copy the Helm command and add the following flags:
+```bash
+--set intentsOperator.operator.enableNetworkPolicyCreation=false \
+--set networkMapper.kafkawatcher.enable=true \
+--set networkMapper.kafkawatcher.kafkaServers={"kafka-0.kafka"}
+```
-![Make sure access graph is configured correctly](../../../static/img/quick-tutorials/k8s-kafka-mtls//cloud-settings.png)
+Note that enforcement is disabled, we will enable it later. The configuration tab should look like this:
-## Install Kafka
+### 2. Install Kafka
We will deploy a Kafka broker using Bitnami's [Helm chart](https://github.com/bitnami/charts/tree/master/bitnami/kafka).
In the chart we will configure Kafka to:
@@ -65,7 +52,7 @@ In the chart we will configure Kafka to:
Expand to see the Helm values.yaml used with the Bitnami chart
```yaml
-{@include: ../../../static/code-examples/kafka-mtls/helm/values.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls/helm/values.yaml}
```
@@ -81,7 +68,7 @@ helm install --create-namespace -n kafka \
You can watch for all pods to be `Ready` using `kubectl get pods -n kafka -w`.
-## Configure Otterize to manage Kafka access
+### 3. Configure Otterize to manage Kafka access
In our simple example, we'll call the Kafka broker service simply "kafka".
Let's tell Otterize how to connect to the Kafka broker by applying an Otterize `KafkaServerConfig`, naming it `kafka`. The name will be the name we later use to declare `ClientIntents`.
@@ -91,10 +78,11 @@ kubectl apply -f ${ABSOLUTE_URL}/code-examples/kafka-mtls/kafkaserverconfig.yaml
```
```yaml
-{@include: ../../../static/code-examples/kafka-mtls/kafkaserverconfig.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls/kafkaserverconfig.yaml}
```
+## Tutorial
-## Deploy clients
+### Deploy clients
Our simple example consists of two client pods:
@@ -149,7 +137,7 @@ spec:
```yaml
-{@include: ../../../static/code-examples/kafka-mtls/client-deployment.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls/client-deployment.yaml}
```
@@ -157,7 +145,7 @@ spec:
```yaml
-{@include: ../../../static/code-examples/kafka-mtls/client-other-deployment.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls/client-other-deployment.yaml}
```
@@ -248,7 +236,7 @@ You can now browse to your account at [https://app.otterize.com](https://app.ott
The access graph shows, through its green and orange lines linking the services, that no clients are currently blocked because we haven't enabled any sort of enforcement yet. The orange lines indicate that, since we have not declared any intents for these clients, they _would_ be blocked if we were to turn enforcement on.
-## Apply intents
+### Apply intents
:::tip
@@ -259,7 +247,7 @@ You can click on the services or the lines connecting them to see which ClientIn
1. The client declares its intent to call the `kafka.kafka` server with this `intents.yaml` file:
```yaml
-{@include: ../../../static/code-examples/kafka-mtls/client-intents.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls/client-intents.yaml}
```
We can apply intents for the `client` by applying the `client-intents.yaml` file:
@@ -272,11 +260,11 @@ If you go back to your access graph, you'll now see that the `client` has a soli
If you click on that solid line, you will see that the declared intents match the discovered intents, so access is assured.
-![client intents applied](../../../static/img/quick-tutorials/k8s-kafka-mtls/client-intents.png)
+![client intents applied](/img/quick-tutorials/k8s-kafka-mtls/client-intents.png)
2. At this point, since the Kafka server is not actually protected, the `client-other` can still access the topics. The line is orange, indicating that it has no declared intents.
-![Declared Intent](../../../static/img/quick-tutorials/k8s-kafka-mtls/declared-intent.png)
+![Declared Intent](/img/quick-tutorials/k8s-kafka-mtls/declared-intent.png)
We can see what happened:
@@ -286,7 +274,7 @@ We can see what happened:
Also, the access graph shows information about the mTLS certificates (credentials) distributed to the various services, as long as [Cloud-managed credentials](/security#cryptographic-credentials) are being used.
-## Turn on protection
+### Turn on protection
At this point, we haven't actually protected our Kafka broker. From everything we've done so far, we can see, however, that if we were to turn on protection, the `client-other` would lose access to the broker.
@@ -295,7 +283,7 @@ Let's see that in action. Our clients that have not declared intents will be blo
We need to turn protection on in for this Kafka broker by declaring it as a protected service:
```yaml
-{@include: ../../../static/code-examples/kafka-mtls/protectedservice.yaml}
+{@include: ../../../../static/code-examples/kafka-mtls/protectedservice.yaml}
```
Apply this `ProtectedService` resource:
@@ -319,9 +307,9 @@ error="kafka server: The client is not authorized to access this topic
And if you look back at your access graph, you'll see that the Kafka broker is now protected, and that the `client-other` and `client-authenticated` are blocked.
-![Clients blocked](../../../static/img/quick-tutorials/k8s-kafka-mtls/clients-blocked.png)
+![Clients blocked](/img/quick-tutorials/k8s-kafka-mtls/clients-blocked.png)
-## What did we accomplish?
+### What did we accomplish?
- Controlling Kafka access no longer means touching ACLs, issuing and managing and distributing certs, establishing trust,
etc.
diff --git a/docs/features/network-mapping-network-policies/_category_.json b/docs/features/network-mapping-network-policies/_category_.json
new file mode 100644
index 000000000..c0f24a86c
--- /dev/null
+++ b/docs/features/network-mapping-network-policies/_category_.json
@@ -0,0 +1,8 @@
+{
+ "label": "Network mapping & network policies",
+ "position": 1,
+ "collapsed": true,
+ "customProps": {
+ "image": "/img/icons/networking.png"
+ }
+}
diff --git a/docs/features/network-mapping-network-policies/index.mdx b/docs/features/network-mapping-network-policies/index.mdx
new file mode 100644
index 000000000..bdfc1beae
--- /dev/null
+++ b/docs/features/network-mapping-network-policies/index.mdx
@@ -0,0 +1,102 @@
+---
+sidebar_position: 1
+title: Network mapping & network policies
+---
+import DocsLinkCard from "@site/src/components/LinkCard";
+
+export const network_access_tutorials = [
+ {
+ title: 'Visualizing a Kubernetes Network',
+ description: 'Map network traffic in a cluster and view the connections',
+ url: '/features/network-mapping-network-policies/tutorials/k8s-network-mapper'
+ },
+{
+ title: 'Create and manage network policies',
+ description: 'Create Kubernetes network policies using IBAC',
+ url: '/features/network-mapping-network-policies/tutorials/k8s-network-policies'
+ },
+ {
+ title: 'Protecting a service with network policies',
+ description: 'An example on how to secure a single service',
+ url: '/features/network-mapping-network-policies/tutorials/protect-1-service-network-policies'
+ },
+ {
+ title: 'AWS EKS network policies with the VPC CNI',
+ description: 'Leverage AWS VPC CNI to apply network policies in EKS',
+ url: '/features/network-mapping-network-policies/tutorials/aws-eks-cni-mini'
+ }
+];
+
+Otterize's open-source [Network Mapper](/reference/configuration/network-mapper) and [Intents Operator](/reference/configuration/intents-operator) can map your cluster, with zero configuration, low privileges and low resource usage, and automate the management of network policies.
+
+### Tutorials
+
+View the tutorials below to learn more about how to get started:
+
+
+
+
+### Mapping & visualizing
+
+Otterize's [Network Mapper](/reference/configuration/network-mapper) is a zero-config, open-source and non-invasive tool to map your cluster. Deploy it on your cluster to get a graphical, textual or JSON representation of your cluster, and optionally use it to generate ClientIntents, which are declarations of the access each service in your cluster requires.
+
+By connecting your cluster to Otterize Cloud, you'll immediately be presented with an interactive, historic and filterable map of your cluster.
+You can access the same information in different formats by using the open-source Otterize CLI, with the commands `otterize mapper export`, `otterize mapper list` and `otterize mapper visualize`.
+
+**visualize example:**:
+![visualize example](/img/examples/example-visualize.png)
+
+**list example:**
+```
+client in namespace otterize-tutorial-npol calls:
+ - server in namespace otterize-tutorial-npol
+client-other in namespace otterize-tutorial-npol calls:
+ - server in namespace otterize-tutorial-npol
+```
+**export example:**
+```yaml
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: client
+ namespace: otterize-tutorial-npol
+spec:
+ service:
+ name: client
+ calls:
+ - name: server
+---
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: client-other
+ namespace: otterize-tutorial-npol
+spec:
+ service:
+ name: client-other
+ calls:
+ - name: server
+```
+
+
+### Access control
+
+By default, Kubernetes pods permit all outgoing and incoming traffic, posing potential security risks.
+
+Kubernetes [NetworkPolicies](/reference/terminology#network-policies) can be employed to limit either egress or ingress traffic, thereby enhancing security and compliance.
+Having deployed Otterize, you can then apply the [ClientIntents](/reference/IBAC-Overview) generated by the network mapper, or declared by you, to your cluster. The [Intents Operator](/reference/configuration/intents-operator) calculates which [Network Policies](/reference/terminology#network-policies) are required to allow the traffic declared by the ClientIntents, enforcing access on your cluster so that only intentional access is allowed.
+
+Read more in the [Network Policies Deep Dive](/features/network-mapping-network-policies/Reference/Network-Policies-Deep-Dive).
+
+```yaml
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: client
+ namespace: otterize-tutorial-istio
+spec:
+ service:
+ name: client
+ calls:
+ - name: nginx
+```
\ No newline at end of file
diff --git a/docs/reference/access-controls/network-policies/README.mdx b/docs/features/network-mapping-network-policies/reference/Network-Policies-Deep-Dive.mdx
similarity index 99%
rename from docs/reference/access-controls/network-policies/README.mdx
rename to docs/features/network-mapping-network-policies/reference/Network-Policies-Deep-Dive.mdx
index fb5e5ad53..10517004d 100644
--- a/docs/reference/access-controls/network-policies/README.mdx
+++ b/docs/features/network-mapping-network-policies/reference/Network-Policies-Deep-Dive.mdx
@@ -1,6 +1,6 @@
---
sidebar_position: 1
-title: Network policies deep dive
+title: Network Policies Deep Dive
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
diff --git a/docs/features/network-mapping-network-policies/reference/README.mdx b/docs/features/network-mapping-network-policies/reference/README.mdx
new file mode 100644
index 000000000..b81897009
--- /dev/null
+++ b/docs/features/network-mapping-network-policies/reference/README.mdx
@@ -0,0 +1,167 @@
+---
+sidebar_position: 3
+title: Reference
+hide_table_of_contents: true
+---
+
+### ClientIntents example (YAML)
+
+```yaml
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: client-server-access
+ namespace: otterize-example
+spec:
+ service:
+ # The name of the service initiating the connection
+ name: client
+ calls:
+ # The name of the service receiving the connection. Multiple names can be provided
+ - name: server
+ # multiple services can be added
+ - name: orderservice
+ # Optional granular rules can be added like the HTTPResources options below.
+ type: http
+ HTTPResources:
+ - path: /orders/*
+ methods: [ get, post ]
+```
+
+
+### Helm Chart options
+
+| Key | Description | Default |
+|-----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------|---------|
+| `global.deployment.networkMapper` | Whether or not to deploy network-mapper. | `true` |
+| `operator.autoCreateNetworkPoliciesForExternalTraffic` | (deprecated, use `allowExternalTraffic` instead) Automatically allow external traffic, if a new ClientIntents resource would result in blocking external (internet) traffic and there is an Ingress/Service resource indicating external traffic is expected. | `true` |
+| `operator.autoCreateNetworkPoliciesForExternalTrafficDisableIntentsRequirement` | (deprecated, use `allowExternalTraffic` instead) **experimental** - If `autoCreateNetworkPoliciesForExternalTraffic` is enabled, do not require ClientIntents resources - simply create network policies based off of the existence of an Ingress/Service resource. | `false` |
+
+View the [Helm chart reference](/reference/configuration/otterize-chart) for all other options
+
+### Network mapper parameters
+All configurable parameters of the network mapper can be configured under the alias `networkMapper`.
+Further information about network mapper parameters can be found [in the network mapper's chart](https://github.com/otterize/helm-charts/tree/main/network-mapper).
+
+
+### CLI: Network mapper commands
+
+All `otterize network-mapper` commands share a set of optional flags which will not be repeated in the documentation
+for each command.
+
+`otterize network-mapper reset`
+
+Resets the network mapper by deleting all map information built up so far in memory.
+
+---
+
+`otterize network-mapper list [-n ,,...]`
+
+Return the network map built by the network mapper since it started, or since it was reset,
+as a list of clients and the servers they call.
+
+#### Options
+
+| Name | Default | Description |
+|------------------------|---------|-------------------------------------------------------------|
+| `-n` or `--namespaces` | | Include only clients in these namespaces (comma-separated). |
+
+#### Returns
+
+Here's a partial output from `otterize network-mapper list -n otterize-ecom-demo`:
+
+```shell
+cartservice in namespace otterize-ecom-demo calls:
+ - redis-cart
+checkoutservice in namespace otterize-ecom-demo calls:
+ - cartservice
+ - currencyservice
+ - emailservice
+ - paymentservice
+ - productcatalogservice
+ - shippingservice
+frontend in namespace otterize-ecom-demo calls:
+ - adservice
+ - cartservice
+ - checkoutservice
+ - currencyservice
+ - productcatalogservice
+ - recommendationservice
+ - shippingservice
+loadgenerator in namespace otterize-ecom-demo calls:
+ - frontend
+recommendationservice in namespace otterize-ecom-demo calls:
+ - productcatalogservice
+```
+---
+`otterize network-mapper visualize [--format=png | --format=jpg] [-n ,,...] -o `
+Return the network map built by the network mapper since it started, or since it was reset,
+as an image.
+
+Uses GraphViz (specifically go-graphviz) to generate the image.
+
+#### Options
+
+| Name | Default | Description |
+|-------------------------|---------|-----------------------------------------------------------------------------------------|
+| `--format` | `png` | Image output format: "png" or "jpg". |
+| `-n` or `--namespaces` | | Include only clients in these namespaces (comma-separated). |
+| `-o` or `--output-path` | | Filename for the image. |
+| `--exclude-labels` | | A list of labels that would exclude services from list/export. example: "include=false" |
+| `--exclude-services` | | A list of service to exclude from list/export. example: "service1,service2" |
+
+#### Returns
+
+Here's the image generated by running `otterize network-mapper visualize -n otterize-ecom-demo -o otterize-ecom-demo.png`:
+![graph](https://user-images.githubusercontent.com/29180932/221423644-df8fbba2-dca1-4c56-baeb-f0d0afc55eb1.png)
+
+---
+
+`otterize network-mapper export [--format] [-n ,,...] [-o ] [--output-type ]`
+
+Return the network map built by the network mapper since it started, or since it was reset,
+as YAML [client intents file(s)](/reference/intents-and-intents-files/#intents-file-formats) or as JSON file(s).
+
+#### Options
+
+| Name | Default | Description |
+|------------------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--format` | `yaml` | Specifies the format for the export: either `yaml` or `json`. |
+| `-n` or `--namespaces` | | Export only clients in these namespaces (comma-separated). |
+| `-o` or `--output` | `STDOUT` | Filename or directory for redirecting the output. |
+| `--output-type` | `single-file` | Whether the output should be written as a single file (`single-file`) or as multiple files in a directory (`dir`). Requires the `-o` or `--output` to point to a directory. |
+| `--server` | | Export only intents for clients that call this server. The server name must be specified with both service name and namespace, in the format `.`. Example: `cartservice.otterize-ecom-demo`. |
+| `--exclude-labels` | | A list of labels that would exclude services from list/export. Example: `include=false` would exclude any service labeled with `include=false` from being included in list/export. |
+| `--exclude-services` | | A list of services to exclude from list/export. Example: `service1,service2`. |
+
+#### Returns
+
+Here's a partial output from `otterize network-mapper export -n otterize-ecom-demo`:
+
+```shell
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: cartservice
+ namespace: otterize-ecom-demo
+spec:
+ service:
+ name: cartservice
+ calls:
+ - name: redis-cart
+ type: http
+---
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: checkoutservice
+ namespace: otterize-ecom-demo
+spec:
+ service:
+ name: checkoutservice
+ calls:
+ - name: cartservice
+ type: http
+ - name: currencyservice
+ type: http
+```
\ No newline at end of file
diff --git a/docs/features/network-mapping-network-policies/tutorials/_category_.json b/docs/features/network-mapping-network-policies/tutorials/_category_.json
new file mode 100644
index 000000000..bdfe77bf2
--- /dev/null
+++ b/docs/features/network-mapping-network-policies/tutorials/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Tutorials",
+ "position": 2,
+ "collapsed": false
+}
diff --git a/docs/quickstart/access-control/aws-eks-cni-mini.mdx b/docs/features/network-mapping-network-policies/tutorials/aws-eks-cni-mini.mdx
similarity index 79%
rename from docs/quickstart/access-control/aws-eks-cni-mini.mdx
rename to docs/features/network-mapping-network-policies/tutorials/aws-eks-cni-mini.mdx
index fbf8809dc..56177fe59 100644
--- a/docs/quickstart/access-control/aws-eks-cni-mini.mdx
+++ b/docs/features/network-mapping-network-policies/tutorials/aws-eks-cni-mini.mdx
@@ -1,6 +1,6 @@
---
-sidebar_position: 2
-title: Network policies on AWS EKS with the VPC CNI
+sidebar_position: 4
+title: AWS EKS network policies with the VPC CNI
image: /img/quick-tutorials/aws-eks-mini/social.png
---
@@ -13,11 +13,13 @@ This tutorial will walk you through deploying an AWS EKS cluster with the AWS VP
## Prerequisites
- An EKS cluster with the AWS VPC CNI add-on installed and with the new built-in network policy support enabled. See [Installing the AWS VPC CNI add-on](https://docs.aws.amazon.com/eks/latest/userguide/pod-networking.html) for more information, or follow the instructions below.
-- The [Otterize CLI](/installation#install-the-otterize-cli).
+- The [Otterize CLI](/overview/installation#install-the-otterize-cli).
- The [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html).
- The [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) command-line tool.
-## Step one: Create an AWS EKS cluster with the AWS VPC CNI plugin
+## Tutorial
+
+### Step one: Create an AWS EKS cluster with the AWS VPC CNI plugin
Before you start, you'll need an AWS Kubernetes cluster. Having a cluster with a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) that supports [NetworkPolicies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) is required for this tutorial.
@@ -75,44 +77,11 @@ eksctl create cluster -f cluster-config.yaml
Once your AWS EKS has finished deploying the control pane and node group, the next step is deploying Otterize as well as a couple of clients and a server to see how they are affected by network policies.
-## Step two: Install the Otterize agents
+### Step two: Install the Otterize agents
### Install Otterize on your cluster
-You can now install Otterize in your cluster, and optionally connect to Otterize Cloud. Connecting to Cloud lets you see what's happening visually in your browser, through the "access graph".
-
-So either forego browser visualization and:
-
-
-Install Otterize in your cluster, without Otterize Cloud
-
-{@include: ../../_common/install-otterize.md}
-
-
-
-Or choose to include browser visualization and:
-
-
-Install Otterize in your cluster, with Otterize Cloud
-
-#### Create an Otterize Cloud account
-
-{@include: ../../_common/create-account.md}
-
-#### Install Otterize OSS, connected to Otterize Cloud
-
-{@include: ../../_common/install-otterize-from-cloud.md}
-
-
-
-Finally, you'll need to install the Otterize CLI (if you haven't already) to interact with the network mapper:
-
-
-Install the Otterize CLI
-
-{@include: ../../_common/install-otterize-cli.md}
-
-
+To deploy Otterize, head over to [Otterize Cloud](https://app.otterize.com) and create a Kubernetes cluster on the [Clusters page](https://app.otterize.com/clusters), and follow the instructions.
### Deploy a server and two clients
@@ -126,7 +95,7 @@ Once you have that installed and running your Otterize access graph should look
![Access Graph](/img/quick-tutorials/aws-eks-mini/access-graph.png)
-## Step three: Create an intent
+### Step three: Create an intent
Now that you have Otterize installed, the next step is to create an intent which will enable access to the server from the client. If you enable protection on the server without declaring an intent, the client will be blocked.
@@ -173,7 +142,7 @@ At which point you should see that the `server` service is ready to be protected
And you can then protect the `server` service by applying the following `yaml` file:
```yaml
-{@include: ../../../static/code-examples/aws-eks-mini/protect-server.yaml}
+{@include: ../../../../static/code-examples/aws-eks-mini/protect-server.yaml}
```
Protect the server by applying the resource:
@@ -185,7 +154,7 @@ And you should see your access graph showing the service as protected:
![Protected Service](/img/quick-tutorials/aws-eks-mini/protected.png)
-## What's next
+### What's next
Have a look at the [guide on how to deploy protection to a larger, more complex application one step at a time](/guides/protect-1-service-network-policies).
diff --git a/docs/quickstart/visualization/k8s-network-mapper.mdx b/docs/features/network-mapping-network-policies/tutorials/k8s-network-mapper.mdx
similarity index 51%
rename from docs/quickstart/visualization/k8s-network-mapper.mdx
rename to docs/features/network-mapping-network-policies/tutorials/k8s-network-mapper.mdx
index b703b3cf7..6a9ef08c2 100644
--- a/docs/quickstart/visualization/k8s-network-mapper.mdx
+++ b/docs/features/network-mapping-network-policies/tutorials/k8s-network-mapper.mdx
@@ -1,6 +1,6 @@
---
sidebar_position: 1
-title: Network mapping a Kubernetes cluster
+title: Mapping a Kubernetes network
image: /img/visualization/k8s-network-mapper/social.png
---
@@ -8,7 +8,7 @@ import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
import styles from "/src/css/styles.module.css";
-The network mapper allows you to map network traffic for your K8s cluster. Once mapped you can export it as an image, json, list, or view it within Otterize Cloud.
+The network mapper allows you to map pod-to-pod traffic within your K8s cluster.
In this tutorial, we will:
@@ -17,54 +17,14 @@ In this tutorial, we will:
## Prerequisites
-
-Prepare a Kubernetes cluster
+### Install Otterize on your cluster
+To deploy Otterize, head over to [Otterize Cloud](https://app.otterize.com) and create a Kubernetes cluster on the [Clusters page](https://app.otterize.com/clusters), and follow the instructions.
-Before you start, you'll need a Kubernetes cluster. Having a cluster with a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) that supports [NetworkPolicies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) isn't required for this tutorial, but is recommended so that your cluster works with other tutorials.
+We will also need the [Otterize CLI](/overview/installation#install-the-otterize-cli).
-{@include: ../../_common/cluster-setup.md}
+## Tutorial
-
-
-You can now install Otterize in your cluster (if it's not already installed), and optionally connect to Otterize Cloud. Connecting to Cloud lets you:
-
-1. See what's happening visually in your browser, through the "access graph";
-2. View pod public internet egress traffic.
-
-So either forego browser visualization and:
-
-
-Install Otterize in your cluster, without Otterize Cloud
-
-{@include: ../../_common/install-otterize.md}
-
-
-
-Or choose to include browser visualization and:
-
-
-Install Otterize in your cluster, with Otterize Cloud
-
-#### Create an Otterize Cloud account
-
-{@include: ../../_common/create-account.md}
-
-#### Install Otterize OSS, connected to Otterize Cloud
-
-{@include: ../../_common/install-otterize-from-cloud-with-enforcement.md}
-
-
-
-Finally, you'll need to install the Otterize CLI (if you haven't already) to interact with the network mapper:
-
-
-Install the Otterize CLI
-
-{@include: ../../_common/install-otterize-cli.md}
-
-
-
-## Deploy demo to simulate traffic
+### Deploy demo to simulate traffic
Let's add services and traffic to the cluster and see how the network mapper builds the map.
Deploy the following simple example — `client`, `client2` and `server`, communicating over HTTP:
@@ -73,16 +33,7 @@ Deploy the following simple example — `client`, `client2` and `server`, co
kubectl apply -n otterize-tutorial-mapper -f ${ABSOLUTE_URL}/code-examples/network-mapper/all.yaml
```
-
-Expand to see the deployment YAML
-
-```yaml
-{@include: ../../../static/code-examples/network-mapper/all.yaml}
-```
-
-
-
-## Map the cluster
+### Map the cluster
The network mapper starts to sniff traffic and build an in-memory network map as soon as it's installed.
The Otterize CLI allows you to interact with the network mapper to grab a snapshot of current mapped traffic,
@@ -92,7 +43,7 @@ For a complete list of the CLI capabilities read the [CLI command reference](/re
### Extract and see the network map
-{@include: ../../getting-started/_show_mapped_traffic_cli.mdx}
+{@include: ../../../getting-started/_show_mapped_traffic_cli.mdx}
### Show the access graph in Otterize Cloud
@@ -103,17 +54,17 @@ If you've attached Otterize OSS to Otterize Cloud, you can now also see the [acc
The access graph reveals several types of information and insights, such as:
1. Seeing the network map for different clusters, seeing the subset of the map for a given namespace, or even — according to how you've mapped namespaces to environments — seeing the subset of the map for a specific environment.
-2. Viewing the public internet egress traffic for each pod, including the DNS name and the IPs associated with each outbound request.
-3. Filtering the map to include recently-seen traffic, since some date in the past. That way you can eliminate calls that are no longer relevant, without having to reset the network mapper and start building a new map.
-4. If the intents operator is also connected, the access graph now reveals more specifics about access: understand which services are protected or would be protected, and which client calls are being blocked or would be blocked. We'll see more of that in the next couple of tutorials.
+2. Filtering the map to include recently-seen traffic, since some date in the past. That way you can eliminate calls that are no longer relevant, without having to reset the network mapper and start building a new map.
+3. If the intents operator is also connected, the access graph now reveals more specifics about access: understand which services are protected or would be protected, and which client calls are being blocked or would be blocked. We'll see more of that in the next couple of tutorials
+
+Note, for example, that the `client` → `server` arrow is yellow. Clicking on it shows:
-Note, for example, that the `client` → `server` arrow is yellow. Clicking on it shows the automatically generated intents for both the client pod to the server pod and the egress of the client to the public internet. If we take a closer look, the ClientIntent YAML specifies that the `client` can call the `server` on the internal network, and it can reach the IP Address `142.250.189.174`. We can see from the comment that this IP belongs to google.com.
-## What's next
+### What's next
The network mapper is a great way to bootstrap IBAC. It generates client intents files that reflect
the current topology of your services; those can then be used by each client team to grant them easy
@@ -125,7 +76,7 @@ Where to go next?
- If you haven't already, see the [automate network policies tutorial](/quickstart/access-control/k8s-network-policies).
- Or go to the next tutorial to [automate secure access for Kafka](/quickstart/access-control/k8s-kafka-mtls).
-### Teardown
+## Teardown
To remove the deployed examples run:
diff --git a/docs/quickstart/access-control/k8s-network-policies.mdx b/docs/features/network-mapping-network-policies/tutorials/k8s-network-policies.mdx
similarity index 81%
rename from docs/quickstart/access-control/k8s-network-policies.mdx
rename to docs/features/network-mapping-network-policies/tutorials/k8s-network-policies.mdx
index 2117d5210..f76bbe470 100644
--- a/docs/quickstart/access-control/k8s-network-policies.mdx
+++ b/docs/features/network-mapping-network-policies/tutorials/k8s-network-policies.mdx
@@ -1,6 +1,6 @@
---
sidebar_position: 1
-title: NetworkPolicy automation
+title: NetworkPolicy Automation
image: /img/quick-tutorials/network-policies/social.png
---
@@ -23,45 +23,14 @@ In this tutorial, we will:
## Prerequisites
-
-Prepare a Kubernetes cluster that supports network policies
-
-Before you start, you'll need a Kubernetes cluster with a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) that supports [NetworkPolicies](https://kubernetes.io/docs/concepts/services-networking/network-policies/).
-
-{@include: ../../_common/cluster-setup.md}
-
-
-
-You can now install (or reinstall) Otterize in your cluster, and optionally connect to Otterize Cloud. Connecting to Cloud lets you:
-
-1. See what's happening visually in your browser, through the "access graph";
-2. Avoid using SPIRE (which can be installed with Otterize) for issuing certificates, as Otterize Cloud provides a certificate service.
-
-So either forego browser visualization and:
-
-
-Install Otterize in your cluster, without Otterize Cloud
+### Install Otterize on your cluster
+To deploy Otterize, head over to [Otterize Cloud](https://app.otterize.com) and associate a Kubernetes cluster on the [Clusters page](https://app.otterize.com/clusters), and follow the instructions.
-{@include: ../../_common/install-otterize.md}
+We will also need the [Otterize CLI](/overview/installation#install-the-otterize-cli).
-
-
-Or choose to include browser visualization and:
-
-
-Install Otterize in your cluster, with Otterize Cloud
-
-#### Create an Otterize Cloud account
-
-{@include: ../../_common/create-account.md}
-
-#### Install Otterize OSS, connected to Otterize Cloud
-
-{@include: ../../_common/install-otterize-from-cloud-with-enforcement.md}
-
-
+## Tutorial
-## Deploy the server and the two clients
+### Deploy the server and the two clients
Our simple example consists of three pods: an HTTP server and two clients that call it.
@@ -72,7 +41,7 @@ Our simple example consists of three pods: an HTTP server and two clients that c
```yaml
-{@include: ../../../static/code-examples/automate-network-policies/namespace.yaml}
+{@include: ../../../../static/code-examples/automate-network-policies/namespace.yaml}
```
@@ -80,23 +49,23 @@ Our simple example consists of three pods: an HTTP server and two clients that c
```yaml
-{@include: ../../../static/code-examples/automate-network-policies/server-deployment.yaml}
+{@include: ../../../../static/code-examples/automate-network-policies/server-deployment.yaml}
---
-{@include: ../../../static/code-examples/automate-network-policies/server-service.yaml}
+{@include: ../../../../static/code-examples/automate-network-policies/server-service.yaml}
```
```yaml
-{@include: ../../../static/code-examples/automate-network-policies/client-deployment.yaml}
+{@include: ../../../../static/code-examples/automate-network-policies/client-deployment.yaml}
```
```yaml
-{@include: ../../../static/code-examples/automate-network-policies/client-other-deployment.yaml}
+{@include: ../../../../static/code-examples/automate-network-policies/client-other-deployment.yaml}
```
@@ -176,7 +145,7 @@ If you've attached Otterize OSS to Otterize Cloud, you can now browse to your ac
![Access graph](/img/quick-tutorials/network-policies/base.png)
-## Apply intents
+### Apply intents
We will now declare that the **client** intends to call the **server**.
@@ -193,7 +162,7 @@ You can click on the services or the lines connecting them to see which ClientIn
1. Here is the `intents.yaml` declaration of the client, which we will apply below:
```yaml
-{@include: ../../../static/code-examples/automate-network-policies/intents.yaml}
+{@include: ../../../../static/code-examples/automate-network-policies/intents.yaml}
```
### See it in action
@@ -269,7 +238,7 @@ It's now clear what happened:
Otterize did its job of both protecting the server _and_ allowing intended access.
:::
-## What did we accomplish?
+### What did we accomplish?
- Controlling access through network policies no longer means touching network policies at all.
@@ -304,7 +273,7 @@ Further information about network policies and Otterize can be found
Try to create an intents file yourself for **client-other**, and apply it to allow this other client to call the server.
:::
-## What's next
+### What's next
- Get started with the [Otterize network mapper](/quickstart/visualization/k8s-network-mapper) to help you bootstrap intents files
for use in [intent-based access control (IBAC)](/intent-based-access-control).
diff --git a/docs/guides/protect-1-service-network-policies.mdx b/docs/features/network-mapping-network-policies/tutorials/protect-1-service-network-policies.mdx
similarity index 79%
rename from docs/guides/protect-1-service-network-policies.mdx
rename to docs/features/network-mapping-network-policies/tutorials/protect-1-service-network-policies.mdx
index b0060cb4e..0587745be 100644
--- a/docs/guides/protect-1-service-network-policies.mdx
+++ b/docs/features/network-mapping-network-policies/tutorials/protect-1-service-network-policies.mdx
@@ -1,7 +1,6 @@
---
sidebar_position: 1
-title: "Protecting one service with network policies"
-sidebar_label: "Protecting one service with network policies"
+title: "Protecting a service with network policies"
---
import CodeBlock from "@theme/CodeBlock";
@@ -27,16 +26,15 @@ Note: all the capabilities of IBAC are within Otterize OSS, while the access gra
## Prerequisites
-
-Prepare a cluster
+### Install Otterize on your cluster
+If you do not have a cluster, we will need to prepare one with [network policy support](/overview/installation#create-a-cluster-with-support-for-network-policies)
-Before you start, you'll need a Kubernetes cluster.
+To deploy Otterize, head over to [Otterize Cloud](https://app.otterize.com) and create a Kubernetes cluster on the [Clusters page](https://app.otterize.com/clusters), and follow the instructions.
-{@include: ../_common/cluster-setup.md}
-
+We will also need the [Otterize CLI](/overview/installation#install-the-otterize-cli).
-
-Deploy the demo set of services
+## Tutorial
+### Deploy the demo set of services
To deploy these into your cluster:
```bash
@@ -44,88 +42,64 @@ kubectl create namespace otterize-ecom-demo
kubectl apply -n otterize-ecom-demo -f ${ABSOLUTE_URL}/code-examples/shadow-mode/ecom-demo.yaml
```
-Optional: check that the demo was deployed.
-
-To see all the pods in the demo:
-```bash
-kubectl get pods -n otterize-ecom-demo
-```
-The pods should all be ready and running:
-```bash
-NAME READY STATUS RESTARTS AGE
-adservice-65494cbb9d-5lrv6 1/1 Running 0 115s
-cartservice-6d84fc45bb-hdtwn 1/1 Running 0 115s
-checkoutservice-5599486df-dvj9n 1/1 Running 3 (79s ago) 115s
-currencyservice-6d64686d74-lxb7x 1/1 Running 0 115s
-emailservice-7c6cbfbbd7-xjxlt 1/1 Running 0 115s
-frontend-f9448d7d4-6dmnr 1/1 Running 0 115s
-kafka-0 1/1 Running 2 (83s ago) 115s
-loadgenerator-7f6987f59-bchgm 1/1 Running 0 114s
-orderservice-7ffdbf6df-wzzfd 1/1 Running 0 115s
-otterize-ecom-demo-zookeeper-0 1/1 Running 0 115s
-paymentservice-86855d78db-zjjfn 1/1 Running 0 115s
-productcatalogservice-5944c7f666-2rjc6 1/1 Running 0 115s
-recommendationservice-6c8d848498-zm2rm 1/1 Running 0 114s
-redis-cart-6b79c5b497-xpms2 1/1 Running 0 115s
-shippingservice-85694cb9bd-v54xp 1/1 Running 0 114s
-```
-
-You can now browse the web app of this demo, if you wish:
-
-
-
-
-To get the externally-accessible URL where your demo front end is available, run:
-```bash
-kubectl get service -n otterize-ecom-demo frontend-external | awk '{print $4}'
-```
-The result should be similar to (if running on AWS EKS):
-```
-a11843075fd254f8099a986467098647-1889474685.us-east-1.elb.amazonaws.com
-```
-Go ahead and browse to the URL above to "shop" and get a feel for the demo's behavior.
-(The URL might take some time to populate across DNS servers. Note that we are accessing an HTTP and not an HTTPS website.)
-
-
-
-To get the externally-accessible URL where your demo front end is available, run:
-```
-kubectl port-forward -n otterize-ecom-demo service/frontend-external 8080:80 &
-```
-The demo is now accessible at:
-```
-http://localhost:8080
-```
-Go ahead and browse to the URL above to "shop" and get a feel for the demo's behavior.
-
-
-
-
-
-
-
-Create an Otterize Cloud account
-
-{@include: ../_common/create-account.md}
-
+ Optional: check that the demo was deployed.
+
+ To see all the pods in the demo:
+ ```bash
+ kubectl get pods -n otterize-ecom-demo
+ ```
+ The pods should all be ready and running:
+ ```bash
+ NAME READY STATUS RESTARTS AGE
+ adservice-65494cbb9d-5lrv6 1/1 Running 0 115s
+ cartservice-6d84fc45bb-hdtwn 1/1 Running 0 115s
+ checkoutservice-5599486df-dvj9n 1/1 Running 3 (79s ago) 115s
+ currencyservice-6d64686d74-lxb7x 1/1 Running 0 115s
+ emailservice-7c6cbfbbd7-xjxlt 1/1 Running 0 115s
+ frontend-f9448d7d4-6dmnr 1/1 Running 0 115s
+ kafka-0 1/1 Running 2 (83s ago) 115s
+ loadgenerator-7f6987f59-bchgm 1/1 Running 0 114s
+ orderservice-7ffdbf6df-wzzfd 1/1 Running 0 115s
+ otterize-ecom-demo-zookeeper-0 1/1 Running 0 115s
+ paymentservice-86855d78db-zjjfn 1/1 Running 0 115s
+ productcatalogservice-5944c7f666-2rjc6 1/1 Running 0 115s
+ recommendationservice-6c8d848498-zm2rm 1/1 Running 0 114s
+ redis-cart-6b79c5b497-xpms2 1/1 Running 0 115s
+ shippingservice-85694cb9bd-v54xp 1/1 Running 0 114s
+ ```
+
+ You can now browse the web app of this demo, if you wish:
+
+
+
+
+ To get the externally-accessible URL where your demo front end is available, run:
+ ```bash
+ kubectl get service -n otterize-ecom-demo frontend-external | awk '{print $4}'
+ ```
+ The result should be similar to (if running on AWS EKS):
+ ```
+ a11843075fd254f8099a986467098647-1889474685.us-east-1.elb.amazonaws.com
+ ```
+ Go ahead and browse to the URL above to "shop" and get a feel for the demo's behavior.
+ (The URL might take some time to populate across DNS servers. Note that we are accessing an HTTP and not an HTTPS website.)
+
+
+
+ To get the externally-accessible URL where your demo front end is available, run:
+ ```
+ kubectl port-forward -n otterize-ecom-demo service/frontend-external 8080:80 &
+ ```
+ The demo is now accessible at:
+ ```
+ http://localhost:8080
+ ```
+ Go ahead and browse to the URL above to "shop" and get a feel for the demo's behavior.
+
+
-
-Install Otterize OSS
-
-{@include: ../_common/install-otterize-from-cloud.md}
-
-
-
-
-Install the Otterize CLI
-
-{@include: ../_common/install-otterize-cli.md}
-
-
-
-
-## Seeing the access graph
+### Seeing the access graph
In the Otterize Cloud UI, your [integration](https://app.otterize.com/integrations) should now show all 3 Otterize OSS operators — the network mapper, intents operator, and credentials operator — as connected, with a green status.
@@ -160,7 +134,7 @@ Otterize can configure several access control mechanisms, such as Istio authoriz
-## Choose one service to protect
+### Choose one service to protect
Now let's prepare to protect just one service, but remain in shadow mode: no actual network policies, yet. We'll verify no intended access would be blocked before turning on the network policy protection.
@@ -200,7 +174,7 @@ We see that:
-## Declare client intents
+### Declare client intents
The graph visually tells us we'll need to declare all 3 of those clients' intents:
1. `frontend` → `productcatalogservice`.
@@ -298,14 +272,14 @@ And you can verify the `productcatalogservice` would not block *any* of its disc
The server is still yellow because it's unprotected — let's fix that.
-## Protect the `productcatalogservice`
+### Protect the `productcatalogservice`
Now that we've verified no intended clients would be blocked, we can safely protect the server.
To do so, recall that we configured Otterize OSS to be in the `defaultShadow` mode: by default, it's in shadow mode for all services, not actually managing network policies for them. To protect a service is a simple matter of applying a `ProtectedService` YAML for it, overriding the default for it:
```yaml
-{@include: ../../static/code-examples/guides/protect-1-service-network-policies/protect-productcatalogservice.yaml}
+{@include: ../../../../static/code-examples/guides/protect-1-service-network-policies/protect-productcatalogservice.yaml}
```
Let's apply this file to our cluster:
@@ -326,22 +300,22 @@ Sure enough, the `productcatalogservice` is
-## Ready for production
+### Ready for production
-### Will load balancers, ingress, and other external traffic be affected?
+#### Will load balancers, ingress, and other external traffic be affected?
The intents operator automatically detects resources of kind `Service` (with type `LoadBalancer` or `NodePort`), or of kind `Ingress`, and creates network policies to allow external traffic to relevant pods.
You do not need to configure anything to get this to work. [Learn more here.](/reference/configuration/intents-operator#handling-external-traffic)
-### Will admission webhook controllers, e.g. policy validators like Kyverno, be affected?
+#### Will admission webhook controllers, e.g. policy validators like Kyverno, be affected?
Since you are not placing a global default-deny policy that would affect controllers in your cluster, only default-deny network policies on individual pods, Otterize will not affect calls to admission webhook controllers and they will continue functioning as before.
-### Working with Otterize in CI/CD
+#### Working with Otterize in CI/CD
We recommend placing the `ClientIntents` and `ProtectedService` resource YAMLs alongside the services that own them, in their respective Git repositories:
- The `ProtectedService` YAMLs alongside the servers they are protecting, e.g. in the Helm chart belonging to the server.
- `ClientIntents` YAMLs, whether they were generated from the network mapper or created and maintained by the client developer teams, alongside each client, e.g. in the Helm chart belonging to the client.
-## In summary
+### In summary
So what have we learned? You can gradually roll out IBAC and drive towards zero trust, service by service, in a safe, predictable, and quick way, by following 4 simple steps:
diff --git a/docs/features/postgresql/_category_.json b/docs/features/postgresql/_category_.json
new file mode 100644
index 000000000..94a4efd26
--- /dev/null
+++ b/docs/features/postgresql/_category_.json
@@ -0,0 +1,8 @@
+{
+ "label": "PostgreSQL",
+ "position": 5,
+ "collapsed": true,
+ "customProps": {
+ "image": "/img/icons/postgresql-no-word-mark.svg"
+ }
+}
diff --git a/docs/features/postgresql/index.mdx b/docs/features/postgresql/index.mdx
new file mode 100644
index 000000000..38d87a00c
--- /dev/null
+++ b/docs/features/postgresql/index.mdx
@@ -0,0 +1,107 @@
+---
+sidebar_position: 1
+title: PostgreSQL | Overview
+hide_title: true
+---
+
+import DocsLinkCard from "@site/src/components/LinkCard";
+
+export const postgres_tutorials = [
+ {
+ title: 'Just-in-time PostgreSQL Access',
+ description: 'Learn how to manage just-in-time users and SQL GRANTs',
+ url: 'postgresql/tutorials/postgres'
+ },
+ {
+ title: 'Map PostgreSQL access',
+ description: 'Learn how to use PostgreSQL audit logs to map access to your database',
+ url: 'postgresql/tutorials/postgres-mapping'
+ }
+];
+
+# PostgreSQL
+
+Otterize is able to create just-in-time username-and-password pairs for your service, providing them as a Kubernetes Secret that can be mounted to file or mapped to environment variables, as well as `GRANT`ing access to databases and tables, based on `ClientIntents` ([Intents-Based Access Control](/overview/intent-based-access-control)) declarations.
+In addition, Otterize can map the access to your PostgreSQL database, showing you which service is accessing which database, table and which operation it's performing. This can be used to automatically generate the `ClientIntents` declarations.
+
+Unlike other access controls in Otterize, PostgreSQL support is exclusively available when using Otterize Cloud.
+
+### Tutorials
+
+To learn how to use the Intents Operator and Credentials Operator to enforce access using PostgreSQL GRANTs, or map access to your PostgreSQL database, try one of these quickstart tutorials.
+
+
+
+
+
+### How does Otterize work with PostgreSQL?
+
+Otterize Cloud will create a unique PostgreSQL username-password combination for each service's use, exposed via a Kubernetes Secret. The service will use these credentials to connect to the database. `ClientIntents` will define the access required by that service. As the intents are applied, Otterize Cloud will keep the database's list of users and GRANTs up to date so that the service is able to access it.
+
+1. To get started, your cluster must have Otterize Cloud installed.
+2. You’ll need to [integrate](https://app.otterize.com/integrations) your database by providing a connection URL and admin-level credentials to manage permissions in your database.
+3. Each service can request a username-password Secret to be created, by annotating the Pod with `credentials-operator.otterize.com/user-password-secret-name`. Below is an example of that annotation and passing the generated credentials into a container with environmental variables.
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: server
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: server
+ template:
+ metadata:
+ annotations:
+ # highlight-next-line
+ credentials-operator.otterize.com/user-password-secret-name: server-creds
+ labels:
+ app: server
+ spec:
+ serviceAccountName: server
+ containers:
+ - name: server
+ imagePullPolicy: Always
+ image: 'supercool/my-example-container'
+ ports:
+ - containerPort: 80
+ env:
+ - name: DB_SERVER_USER
+ valueFrom:
+ secretKeyRef:
+ name: server-creds
+ key: username
+ - name: DB_SERVER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: server-creds
+ key: password
+```
+
+
+4. Apply `ClientIntents` and the specified access will be `GRANT`ed to the service in the `ClientIntents`.
+
+
+```yaml
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: client-intents-for-server
+ namespace: otterize-tutorial-postgres
+spec:
+ service:
+ name: server
+ calls:
+ - name: otterize-tutorial-postgres # Same name as our integration
+ type: database
+ databaseResources:
+ - databaseName: otterize-tutorial
+ table: public.example
+ operations:
+ - SELECT
+ - INSERT
+```
+
+5. Done!
\ No newline at end of file
diff --git a/docs/features/postgresql/reference.mdx b/docs/features/postgresql/reference.mdx
new file mode 100644
index 000000000..61f63b245
--- /dev/null
+++ b/docs/features/postgresql/reference.mdx
@@ -0,0 +1,32 @@
+---
+sidebar_position: 3
+title: Reference
+---
+
+### ClientIntents example (YAML)
+
+```yaml
+apiVersion: k8s.otterize.com/v1alpha3
+kind: ClientIntents
+metadata:
+ name: client-intents-for-server
+ namespace: otterize-tutorial-postgres
+spec:
+ service:
+ # Service requiring access to PostgreSQL
+ name: server
+ calls:
+ # This name will need to match the provided integration name
+ - name: otterize-tutorial-postgres
+ type: database
+ databaseResources:
+ - databaseName: otterize-tutorial
+ # Optional table name, if omitted all tables will be granted access
+ table: public.example
+ # Operations being granted, options include SELECT, INSERT, UPDATE, DELETE, ALL
+ operations:
+ - SELECT
+ - INSERT
+```
+
+
diff --git a/docs/features/postgresql/tutorials/_category_.json b/docs/features/postgresql/tutorials/_category_.json
new file mode 100644
index 000000000..bdfe77bf2
--- /dev/null
+++ b/docs/features/postgresql/tutorials/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Tutorials",
+ "position": 2,
+ "collapsed": false
+}
diff --git a/docs/quickstart/visualization/postgresql.mdx b/docs/features/postgresql/tutorials/postgres-mapping.mdx
similarity index 98%
rename from docs/quickstart/visualization/postgresql.mdx
rename to docs/features/postgresql/tutorials/postgres-mapping.mdx
index 5fc6dfa38..32202f76b 100644
--- a/docs/quickstart/visualization/postgresql.mdx
+++ b/docs/features/postgresql/tutorials/postgres-mapping.mdx
@@ -30,7 +30,7 @@ Already have Otterize deployed with the database integration configured on your
Prepare a Kubernetes cluster
- {@include: ../../_common/cluster-setup.md}
+ {@include: ../../../_common/cluster-setup.md}
@@ -38,11 +38,11 @@ Already have Otterize deployed with the database integration configured on your
#### Create an Otterize Cloud account
- {@include: ../../_common/create-account.md}
+ {@include: ../../../_common/create-account.md}
#### Install Otterize OSS, connected to Otterize Cloud
- {@include: ../../_common/install-otterize-from-cloud-with-enforcement.md}
+ {@include: ../../../_common/install-otterize-from-cloud-with-enforcement.md}
diff --git a/docs/quickstart/access-control/postgresql.mdx b/docs/features/postgresql/tutorials/postgres.mdx
similarity index 100%
rename from docs/quickstart/access-control/postgresql.mdx
rename to docs/features/postgresql/tutorials/postgres.mdx
diff --git a/docs/getting-started/README.mdx b/docs/getting-started/README.mdx
index 2da7691cd..2e6928edd 100644
--- a/docs/getting-started/README.mdx
+++ b/docs/getting-started/README.mdx
@@ -1,64 +1,127 @@
---
sidebar_position: 1
slug: /
-title: Getting started
+title: Getting Started
+hide_title: true
---
-export const LinkButton = (props) => (
-
-);
+export const introduction = [
+ {
+ title: 'What is Otterize',
+ description: 'Learn about how Otterize works and the function of the different Kubernetes operators',
+ url: `/overview`
+ },
+ {
+ title: 'Intent-Based Access Control',
+ description: 'Learn how to use IBAC, with YAML examples.',
+ url: '/overview/intent-based-access-control'
+ },
+];
-Otterize is a platform for implementing intent-based access control ([IBAC](/intent-based-access-control)) for workloads.
-The platform is composed of **Otterize OSS**, which is tailored for a single Kubernetes cluster, and **Otterize Cloud**, which adds visibility and operationalization across Kubernetes clusters and non-Kubernetes infrastructures.
+export const features = [
+ {
+ title: 'Network mapping & network policies',
+ icon: '/img/icons/networking.png',
+ url: '/features/network-mapping-network-policies/'
+ },
+ {
+ title: 'AWS IAM',
+ icon: '/img/icons/aws.png',
+ url: '/features/aws-iam/'
+ },
+ {
+ title: 'Kafka',
+ icon: '/img/icons/kafka-no-word-mark.svg',
+ url: '/features/kafka/'
+ },
+ {
+ title: 'PostgreSQL',
+ icon: '/img/icons/postgresql-no-word-mark.svg',
+ url: '/features/postgresql/'
+ },
+ {
+ title: 'Istio',
+ icon: '/img/icons/istio-no-word-mark.svg',
+ url: '/features/istio/'
+ },
+];
-Otterize enables platform engineers to easily implement, expand, and unify secured access for their Kubernetes workloads.
-## Let's go!
-Dive right in with simple demos to manage access control:
-* [Create and manage network policies](/quickstart/access-control/k8s-network-policies).
-* [Network policies on AWS EKS with the VPC CNI](/quickstart/access-control/aws-eks-cni-mini).
-* [Create and manage Istio authorization policies](/quickstart/access-control/k8s-istio-authorization-policies).
-* [Configure secure access for Kafka using Otterize Cloud mTLS](/quickstart/access-control/k8s-kafka-mtls), or [using cert-manager mTLS](/quickstart/access-control/k8s-kafka-mtls-cert-manager).
+export const tutorials_access = [
+ {
+ title: 'Create and manage network policies',
+ icon: '/img/icons/networking.png',
+ description: 'Create Kubernetes network policies using IBAC',
+ url: '/features/network-mapping-network-policies/tutorials/k8s-network-policies'
+ },
+ {
+ title: 'Network policies on AWS EKS',
+ icon: '/img/icons/networking.png',
+ description: 'Jump start network policies using AWS EKS and VPC CNI',
+ url: '/features/network-mapping-network-policies/tutorials/aws-eks-cni-mini'
+ },
+ {
+ title: 'Create and manage Istio authorization policies',
+ icon: '/img/icons/istio-no-word-mark.svg',
+ description: 'Using Istio and IBAC to secure your K8s cluster',
+ url: '/features/istio/tutorials/k8s-istio-authorization-policies'
+ },
+ {
+ title: 'Configure secure access for Kafka using Otterize Cloud mTLS',
+ icon: '/img/icons/kafka-no-word-mark.svg',
+ description: 'Declaring and applying intents to easily secure access to Kafka',
+ url: '/features/kafka/tutorials/k8s-kafka-mtls-cert-manager'
+ }
+];
-Or visualize communication in your cluster:
-* [Network mapping a Kubernetes cluster](/quickstart/visualization/k8s-network-mapper).
-* [Istio HTTP-level access mapping](/quickstart/visualization/k8s-istio-watcher).
-* [Kafka topic-level access mapping](/quickstart/visualization/k8s-network-mapper).
+export const tutorials_visualization = [
+ {
+ title: 'Network mapping a Kubernetes cluster',
+ icon: '/img/icons/networking.png',
+ description: 'Map pod-to-pod traffic within your K8s cluster',
+ url: '/features/network-mapping-network-policies/tutorials/k8s-network-mapper'
+ },
+ {
+ title: 'Istio HTTP-level access mapping',
+ icon: '/img/icons/istio-no-word-mark.svg',
+ description: 'The network mapper allows you to map pod-to-pod Istio traffic',
+ url: '/features/istio/tutorials/k8s-istio-watcher'
+ },
+ {
+ title: 'Kafka topic-level access mapping',
+ icon: '/img/icons/kafka-no-word-mark.svg',
+ description: 'View topic-level access to Kafka servers within your Kubernetes cluster',
+ url: 'features/kafka/tutorials/k8s-kafka-mapping'
+ }
+];
-## Components
+import DocsLinkCard from "@site/src/components/LinkCard";
-### Otterize OSS
-The Otterize OSS components are standalone open-source projects that implement intent-based access control (IBAC) for a single Kubernetes cluster. This same set of components is used to integrate with Otterize Cloud.
-- The [Otterize intents operator](/reference/configuration/intents-operator) translates ClientIntents resources to access controls: currently, network policies for pod-to-pod access, and ACLs for in-cluster Kafka client access. [See it in GitHub](https://github.com/otterize/intents-operator)
-- The [Otterize credentials operator](/reference/configuration/credentials-operator) integrates with SPIFFE/SPIRE to handle pod identities and manage certificates. [See it in GitHub](https://github.com/otterize/credentials-operator)
-- The [Otterize network mapper](/reference/configuration/network-mapper) sniffs pod-to-pod traffic and builds a network map, which is useful on its own and may also be exported as client intents files for bootstrapping IBAC. [See it in GitHub](https://github.com/otterize/network-mapper)
+
+Otterize is a platform for automating workload IAM (access control) for workloads on Kubernetes.
+Each workload declares what it needs in order to function, and a Kubernetes operator figures out the policies that need to be created. This concept is called [Intent-Based Access Control (IBAC)](/overview/intent-based-access-control).
-### Otterize CLI
+For each kind of supported IAM mechanism (such as Kubernetes Network Policies, AWS IAM policies), Otterize can automatically learn the required ClientIntents.
-The [Otterize CLI](/reference/cli) is used to control the network mapper or output its data, convert non-Kubernetes client
-intents files (if needed) to Kubernetes custom resource YAMLs, interface with the Otterize Cloud.
+Otterize is composed of [three open-source and standalone components (the intents operator, credentials operator and network mapper)](/overview) you deploy on your cluster, as part of a single Helm chart, which handle enforcement, and Otterize Cloud, which supplements them with additional features.
-## Open source and Cloud
+Learn about how Otterize works by reading below, or jump into one of the tutorials below to see how it works hand-on.
+### Introduction
-### Otterize OSS
+
-Otterize OSS is a standalone open-source implementation of intent-based access control (IBAC) for a single Kubernetes cluster. As well as being open source, Otterize OSS is completely free, licensed under the Apache 2.0 license and does not require Otterize Cloud.
+### Features & tutorials
-### Otterize Cloud
+Otterize makes it easy to automate and visualize workload IAM for your Kubernetes clusters across a variety of platforms. Explore each to learn how it works, and see its quickstart tutorials.
-Otterize Cloud adds unified visibility and operationalization,
-and spans multiple Kubernetes clusters as well as (coming soon) non-Kubernetes infrastructures.
+
+
+### Tutorials
+
+#### Automating workload IAM
+
+
+
+#### Visualizing network traffic and data access
+
-Read more in our [product page](https://otterize.com/product).
\ No newline at end of file
diff --git a/docs/getting-started/_category_.json b/docs/getting-started/_category_.json
index 38eabb7ef..012adff4f 100644
--- a/docs/getting-started/_category_.json
+++ b/docs/getting-started/_category_.json
@@ -1,3 +1,4 @@
{
- "collapsed": false
+ "collapsed": true,
+ "label": "Getting started"
}
\ No newline at end of file
diff --git a/docs/guides/_category_.json b/docs/guides/_category_.json
deleted file mode 100644
index 7cb5603b2..000000000
--- a/docs/guides/_category_.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "label": "Guides",
- "position": 4,
- "collapsed": false
-}
\ No newline at end of file
diff --git a/docs/otterize-cloud/object-model.mdx b/docs/otterize-cloud/object-model.mdx
deleted file mode 100644
index c002d5d31..000000000
--- a/docs/otterize-cloud/object-model.mdx
+++ /dev/null
@@ -1,40 +0,0 @@
----
-sidebar_position: 1
-title: Object model
----
-In Otterize Cloud, as across all of the Otterize product, the central object is the **intent**. Intent-based access control (IBAC) bases the authorization model of a server on the set of calls its clients declare they intend to make, granting them access to make declared calls, while blocking undeclared calls.
-
-## Intents
-
-An **intent** is a declaration that service A (the client service) intends to call service B (the server service), often with more granular information about the type of call (e.g. "HTTP" or "Kafka") and other details of the call (e.g. which HTTP resources and methods, or which Kafka topics and operations).
-
-The intent is expressed within a file, specifically a Kubernetes custom resource YAML declaration, that is then created in a specific Kubernetes cluster and namespace via `kubectl apply`. All the intents from a given client should appear in a single client intents YAML file, which then represents the overall intents of that service to be a client of other services.
-
-The declared intents applied in this way to a cluster are processed by the **Otterize intents operator** running in the cluster, which -- if configured to enforce intents -- manages Kubernetes network policies and Kafka ACLs according to those intents. When the intents operator is integrated with Otterize Cloud, it also reports those intents to the Cloud, to build an overall model of service-to-service access called the **access graph**.
-
-The declared intents form the basis of IBAC. But to controllably and confidently roll out IBAC to a working cluster, it's important to compare the declared access to the actual access (or attempted access) happening in the cluster. To that end, the **Otterize network mapper** detects attempted access automatically, and generates **discovered intents**: these reflect the intentions of services to call one another based on discovering the actual call attempts themselves. In other words, discovered intents form a network map of services actually calling each other, while declared intents reflect explicit declarations of services to call each other. When the network mapper is integrated with Otterize Cloud, the access graph in the Cloud will includes both discovered and declared intents, and will yield many more insights.
-
-Currently, Otterize only supports intents within a cluster (i.e. client and service are within the same cluster), not across clusters.
-
-## Services
-
-While declared and discovered intents are the edges of the access graph, **services** are its nodes. A service may be a client of other services, a server to other services, or both. (Note the difference between a "Kubernetes service" which is a specific construct used to make a pod callable by other pods, and an Otterize service that's the more general concept, in the sense of a microservice or a workload.)
-
-In Otterize Cloud, services are _inferred_ from the intents reported to the Cloud by the intents operator and the network mapper: whenever an intent is reported, it carries the information about the client and server of that intent. The intents operator adds more information when the service is identified as a Kafka service (the **Kafka server config**), and the credentials operator, when integrated, adds yet more information about any **certificates** issued to that service.
-
-A service name is unique within a namespace in a cluster, but not in general unique across the cluster or across clusters.
-
-{@include: _environments_and_namespaces.mdx}
-## Integrations
-
-Otterize Cloud currently supports two types of integrations: **Kubernetes integrations** and **generic integrations**. In the future, many other types of integrations will be added, allowing Otterize Cloud to work seamlessly with all your infrastructures and systems.
-
-A Kubernetes integration is used to connect a Kubernetes cluster with Otterize Cloud via any or all of the Otterize operators: the intents operator, the network mapper, and the credentials operator. It contains the credentials needed by the operators running in the Kubernetes cluster to communicate with the Cloud on behalf of that cluster, i.e., it ties together the physical Kubernetes cluster with its representation in Otterize Cloud. The integration also determines the environment to which namespaces in that clusters will be associated by default. The names of Kubernetes-type integrations must be unique within an organization.
-
-A generic integration is used to connect generically an external system to Otterize Cloud. It provides that system credentials to access the Otterize API/CLI, in a way that doesn't involve any specific Otterize user. That makes it ideal for building automations on top of the Otterize API. For example, new clusters provisioned for the development team could be automatically connected to Otterize Cloud, or a CI/CD system could automatically look in the access graph for services that would be blocked or intents that were not declared and applied and fail the build. The name of the integration should reflect the way it will be used. The names of generic-type integrations must be unique within an organization.
-
-When a Kubernetes cluster is connected to Otterize Cloud, it is represented in the Cloud by an **Kubernetes integration** object. You'll name it when you add a Kubernetes integration in the UI or through the API/CLI.
-
-The Otterize operators -- intents operator, network mapper, and/or credentials operator -- running in your cluster will inform the Cloud about the intents, services, and credentials within this cluster, and will also convey their configuration (e.g. shadow or enforcement mode) within this cluster.
-
-Note that, while a cluster and its namespaces and services could be in a single environment, and an environment could contain multiple clusters, many other combinations are possible. For example, a cluster could contain namespaces in multiple environments. Or, environments may contain some namespaces in one cluster and other namespaces in another cluster. Use whatever mappings make sense for your situation.
\ No newline at end of file
diff --git a/docs/overview/_category_.json b/docs/overview/_category_.json
new file mode 100644
index 000000000..ef3474b29
--- /dev/null
+++ b/docs/overview/_category_.json
@@ -0,0 +1,4 @@
+{
+ "collapsed": true,
+ "label": "Overview"
+}
\ No newline at end of file
diff --git a/docs/overview/index.mdx b/docs/overview/index.mdx
new file mode 100644
index 000000000..918680aa0
--- /dev/null
+++ b/docs/overview/index.mdx
@@ -0,0 +1,42 @@
+---
+sidebar_position: 1
+title: Intro
+hide_title: true
+---
+
+# Overview
+
+Otterize is a declarative and zero-trust approach to access management that empowers you to streamline workload IAM while ensuring maximum security.
+
+## How does Otterize work?
+
+Otterize is deployed to Kubernetes using a Helm chart that deploys the core open-source components: the [network mapper](https://github.com/otterize/network-mapper), [intents-operator](https://github.com/otterize/intents-operator), and [credentials-operator](https://github.com/otterize/credentials-operator). These components each have a stand-alone function (mapping access, provisioning policies and provisioning credentials, respectively), but together they can automate workload IAM.
+
+### Network mapper
+
+The network mapper is a zero-config open-source tool that provides insights into your workload traffic without modifying your code or adding additional layers. Once Otterize is installed, the network monitor will automatically inspect pod traffic metadata to create a map of accesses, including:
+* Pod-to-pod traffic
+* Internet egress traffic
+* Pod-to-pod traffic including the URL and HTTP method, when an Istio sidecar is available
+* Kafka topics
+* PostgreSQL databases and tables
+* AWS resources
+
+Out of the box, only pod-to-pod, Internet egress traffic and Istio traffic is collected. To enable the rest, see the tutorials.
+
+This information can then be viewed as a graph, exported textually, or used to automatically generate ClientIntents, a resource used with the intents operator.
+
+For more information, visit the [network mapper reference page](/reference/configuration/network-mapper).
+
+### Credentials operator
+
+The credentials operator handles the provisioning just-in-time credentials for workloads to authenticate. It can issue mTLS credentials or database username + passwords as Kubernetes Secrets, or create AWS IAM roles. To learn about each of these, check the respective tutorials.
+
+For more information, visit the [credential operator reference page](/reference/configuration/credentials-operator)
+
+### Intents operator
+
+The intents operator handles the provisioning of just-in-time policies based on the declared ClientIntents of workloads within a cluster. It can manage network policies, Istio authorization policies, AWS IAM policies, PostgreSQL GRANTs, and Kafka ACLs. To learn about each of these, visit the respective tutorial.
+The intents operator does not implement its own policies, but instead configures your existing infrastructure's policies to allow the access required by each workload. This means that it does not ever see your data.
+
+For more information, visit the [intents operator reference page](reference/configuration/intents-operator)
diff --git a/docs/installation/README.mdx b/docs/overview/installation/README.mdx
similarity index 65%
rename from docs/installation/README.mdx
rename to docs/overview/installation/README.mdx
index 61dcb078a..b77a24199 100644
--- a/docs/installation/README.mdx
+++ b/docs/overview/installation/README.mdx
@@ -1,35 +1,28 @@
---
-sidebar_position: 5
+sidebar_position: 2
title: Installation
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
## Install Otterize without Otterize Cloud (OSS only)
-{@include: ../_common/install-otterize.md}
-
-
-
-
- If you are installing Otterize for network policies, make sure your cluster supports network policies.
- Expand to see how.
-
-
-Before you start, you need to have a Kubernetes cluster with a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) that supports [NetworkPolicies](https://kubernetes.io/docs/concepts/services-networking/network-policies/).
-
-{@include: ../_common/cluster-setup.md}
-
+{@include: ../../_common/install-otterize.md}
### Upgrade Otterize
-{@include: ../_common/upgrade-otterize.md}
+{@include: ../../_common/upgrade-otterize.md}
-## Connect Otterize OSS to Otterize Cloud, or install Otterize with Otterize Cloud
-To connect Otterize OSS to Otterize Cloud you will need to [login](https://app.otterize.com), go to [integrations](https://app.otterize.com/integrations), create a Kubernetes integration, and follow the instructions.
+## Install Otterize with Otterize Cloud
-In a nutshell, you need to `helm upgrade` the same Helm chart, but provide Otterize Cloud credentials. Upon creating a Kubernetes integration, a guide will appear that walks you through doing this with the new credentials just created.
+To connect Otterize OSS to Otterize Cloud you will need to [log in](https://app.otterize.com), create an integration, and follow the instructions which generates unique credentials for your cluster to communicate to Otterize Cloud.
## Install just the Otterize network mapper
-{@include: ../_common/install-otterize-network-mapper.md}
+{@include: ../../_common/install-otterize-network-mapper.md}
+
+## Create a cluster with support for network policies
+Before you start, you need to have a Kubernetes cluster with a [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) that supports [NetworkPolicies](https://kubernetes.io/docs/concepts/services-networking/network-policies/).
+
+{@include: ../../_common/cluster-setup.md}
+---
## Install the Otterize CLI
@@ -37,7 +30,7 @@ The [Otterize CLI](/reference/cli) is a command-line utility used to control and
To install the CLI:
-{@include: ../_common/install-otterize-cli.md}
+{@include: ../../_common/install-otterize-cli.md}
## Uninstall Otterize
diff --git a/docs/overview/intent-based-access-control.mdx b/docs/overview/intent-based-access-control.mdx
new file mode 100644
index 000000000..2bbdf41fc
--- /dev/null
+++ b/docs/overview/intent-based-access-control.mdx
@@ -0,0 +1,42 @@
+---
+sidebar_position: 2
+title: Intent-Based Access Control (IBAC)
+---
+
+## Why intent based access?
+
+We developers are working hard to make the world’s services functional, reliable, performant, and of course secure, all while maximizing velocity. In practice, achieving successful zero-trust security requires enabling stringent access policies on the service we are developing and within the other technologies and services we utilize.
+
+Services may need network access, database access, cloud resource access, and more to achieve the desired functionality. In a zero-trust environment, access must be granted by the data teams, cloud teams, and other teams managing dependent services. This results in a large degree of friction and a lack of a cohesive picture of the access rights needed for each service to function properly.
+
+We believe that each service should define the access it needs to function. This intent-based access control (IBAC) should be easily understood, easily reviewed, and capable of being statically analyzed.
+
+## Enter client intents
+
+A client intents file is simply a list of calls to servers a client intends to make. Coupled with a mechanism for resolving service names, the list of client intents can be translated to different authorization mechanisms, such as network policies, cloud IAM, databases, etc.
+
+In other words, developers declare what their service intends to access, and that can then be converted to a network policy and the associated set of pod labels.
+
+Here’s an example of a client intents file (as a Kubernetes custom resource YAML) for a service named **client** that has network access to another service named **auth-server** and has access to **production-db’s** **metrics** database:
+```yaml
+
+apiVersion: k8s.otterize.com/v1alpha2
+kind: ClientIntents
+metadata:
+ name: client-intents
+
+spec:
+ service:
+ name: client
+ calls:
+ - name: auth-server
+ - name: production-db
+ type: database
+ databaseResources:
+ - databaseName: metrics
+```
+
+## How do intents work?
+When intents are created for a client, the intents operator automatically creates, updates, and deletes the corresponding policies and automatically labels client and server pods to reflect precisely the client-to-server calls declared in client intents files. For instance, for a NetworkPolicy, a single policy is created per server, and pod labels are dynamically updated for clients when their intents are updated
+
+Service names are resolved by recursively getting the owner of a pod until the original owner is found, usually a Deployment, StatefulSet, or other such resource. The name of that resource is used unless the pod has a service-name annotation, in which case the value of that annotation is used instead.
diff --git a/docs/otterize-cloud/README.mdx b/docs/overview/otterize-cloud/README.mdx
similarity index 100%
rename from docs/otterize-cloud/README.mdx
rename to docs/overview/otterize-cloud/README.mdx
diff --git a/docs/otterize-cloud/_category_.json b/docs/overview/otterize-cloud/_category_.json
similarity index 100%
rename from docs/otterize-cloud/_category_.json
rename to docs/overview/otterize-cloud/_category_.json
diff --git a/docs/otterize-cloud/_environments_and_namespaces.mdx b/docs/overview/otterize-cloud/_environments_and_namespaces.mdx
similarity index 100%
rename from docs/otterize-cloud/_environments_and_namespaces.mdx
rename to docs/overview/otterize-cloud/_environments_and_namespaces.mdx
diff --git a/docs/otterize-oss/README.mdx b/docs/overview/otterize-oss/README.mdx
similarity index 60%
rename from docs/otterize-oss/README.mdx
rename to docs/overview/otterize-oss/README.mdx
index 5436c09dd..9397ada18 100644
--- a/docs/otterize-oss/README.mdx
+++ b/docs/overview/otterize-oss/README.mdx
@@ -30,17 +30,3 @@ To get started with Otterize OSS, see the tutorials for [network policies](/quic
Components in Otterize OSS collect usage information — counts of events like `INTENTS_APPLIED`, `NETWORK_POLICY_CREATED`, `KAFKA_ACL_DELETED`, etc. — and can report those back to the Otterize team. This is entirely optional and does not affect the functionality of Otterize OSS, but it does help the team at Otterize understand what the community finds useful and hence how to improve it. (Of course, direct feedback through the [Otterize Community Slack](https://joinslack.otterize.com/) is very much appreciated too.) For more information, including what is sent and how to turn it off or on, see [the usage telemetry documentation](/otterize-oss/usage-telemetry).
-## Roadmap
-
-The near-term roadmap for Otterize OSS currently includes:
-- [[Done](https://github.com/otterize/otterize-cli/releases/tag/v0.1.17)] Adding **network map visualization** capabilities to the Otterize CLI, so you can get network map images from the network mapper.
-
-- [[Done](https://github.com/otterize/network-mapper/releases/tag/v0.1.16)] Adding a **Kafka watcher** to supply more detailed information to the network mapper about calls to any Kafka server: which clients are performing which operations against which topics. This complements the current map built up in the network mapper, which only records which clients called which servers, without any more granular information about those calls. With this new capability, users can bootstrap client intents that contain granular Kafka access intent information, and Otterize Cloud can display topic-level shadow mode information and insights also for Kafka servers and their clients.
-
-- [[Done](https://github.com/otterize/intents-operator/releases/tag/v0.1.20)] Adding support for Istio service mesh access controls. This includes:
- - Pod-to-pod access controls (akin to the current network policies support).
- - Granular HTTP-level (path and method) access controls (akin to the current Kafka topic-level access control support).
- - [Adding HTTP-level (path and method) intent information to the network mapper](https://github.com/otterize/network-mapper/releases/tag/v0.1.16), and to the information optionally sent to Otterize Cloud to support [shadow mode](/shadow-vs-active-enforcement) in the access graph.
-
-
-
diff --git a/docs/otterize-oss/_category_.json b/docs/overview/otterize-oss/_category_.json
similarity index 100%
rename from docs/otterize-oss/_category_.json
rename to docs/overview/otterize-oss/_category_.json
diff --git a/docs/otterize-oss/error-telemetry.mdx b/docs/overview/otterize-oss/error-telemetry.mdx
similarity index 100%
rename from docs/otterize-oss/error-telemetry.mdx
rename to docs/overview/otterize-oss/error-telemetry.mdx
diff --git a/docs/otterize-oss/usage-telemetry.mdx b/docs/overview/otterize-oss/usage-telemetry.mdx
similarity index 100%
rename from docs/otterize-oss/usage-telemetry.mdx
rename to docs/overview/otterize-oss/usage-telemetry.mdx
diff --git a/docs/quickstart/_category_.json b/docs/quickstart/_category_.json
deleted file mode 100644
index 69cd49661..000000000
--- a/docs/quickstart/_category_.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "label": "Quickstart",
- "position": 1,
- "collapsed": false
-}
diff --git a/docs/quickstart/access-control/_category_.json b/docs/quickstart/access-control/_category_.json
deleted file mode 100644
index 58976741d..000000000
--- a/docs/quickstart/access-control/_category_.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "label": "Access control",
- "position": 1,
- "collapsed": false
-}
diff --git a/docs/intent-based-access-control/README.mdx b/docs/reference/IBAC-Overview.mdx
similarity index 100%
rename from docs/intent-based-access-control/README.mdx
rename to docs/reference/IBAC-Overview.mdx
diff --git a/docs/reference/_category_.json b/docs/reference/_category_.json
index 8c87aa627..323775976 100644
--- a/docs/reference/_category_.json
+++ b/docs/reference/_category_.json
@@ -1,5 +1,5 @@
{
"label": "Reference",
"position": 11,
- "collapsed": false
+ "collapsed": true
}
diff --git a/docs/reference/cli/README.mdx b/docs/reference/cli/README.mdx
index ef863a693..2a4cc425a 100644
--- a/docs/reference/cli/README.mdx
+++ b/docs/reference/cli/README.mdx
@@ -4,14 +4,14 @@ title: CLI
---
The Otterize command line interface (CLI) offers the following capabilities:
-- [Interact with](#network-mapper) the [Otterize network mapper](/quickstart/visualization/k8s-network-mapper) running in a Kubernetes cluster.
+- [Interact with](#network-mapper) the [Otterize network mapper](/features/network-mapping-network-policies/tutorials/k8s-network-mapper) running in a Kubernetes cluster.
- [Transform](#otterize-intents-convert--f-path) [intents files](/reference/intents-and-intents-files/#intents-file-formats) from plain YAML format to Kubernetes custom resource YAML format.
- Interact with the Otterize Cloud, through its REST API.
This CLI is open-source software. To see its source or build it yourself,
see [https://github.com/otterize/otterize-cli](https://github.com/otterize/otterize-cli).
-The CLI is available as an installable binary as documented in this [guide](/installation#install-the-otterize-cli).
+The CLI is available as an installable binary as documented in this [guide](/overview/installation#install-the-otterize-cli).
The following are the commands offered by the Otterize CLI.
@@ -38,7 +38,7 @@ To **disable** sending usage information:
If the `telemetry` flag is omitted or set to `true`, telemetry will be enabled: usage information will be reported.
-For more information see the [Usage telemetry Documentation](/otterize-oss/usage-telemetry)
+For more information see the [Usage telemetry Documentation](/overview/otterize-oss/usage-telemetry)
## Global options
diff --git a/docs/reference/configuration/credentials-operator/README.mdx b/docs/reference/configuration/credentials-operator/README.mdx
index 8efd28c35..2c0a1f329 100644
--- a/docs/reference/configuration/credentials-operator/README.mdx
+++ b/docs/reference/configuration/credentials-operator/README.mdx
@@ -3,7 +3,7 @@ sidebar_position: 3
title: Credentials operator
---
-The Otterize credentials operator automatically resolves pods to dev-friendly service names and provisions credentials (certificates) for the services from cert-manager, Otterize Cloud or SPIRE as Kubernetes Secrets.
+The credentials operator provisions just-in-time credentials for workloads running on Kubernetes. These credentials come in the form of Kubernetes Secrets (mTLS certificates, database username + passwords), or AWS IAM roles.
## Deploying the credentials operator
To deploy the operator, [use the Helm chart](/reference/configuration/credentials-operator/helm-chart).
@@ -11,34 +11,41 @@ To deploy the operator, [use the Helm chart](/reference/configuration/credential
To deploy with Otterize Cloud as the certificate provider, we recommend you [follow the instructions in Otterize Cloud](https://app.otterize.com/).
To deploy with cert-manager as the certificate provider, you must also [configure the Issuer name and whether it should look for a ClusterIssuer or an Issuer (namespace-scoped)](/reference/configuration/credentials-operator/helm-chart#cert-manager-parameters).
-## Acquiring mTLS credentials using the credentials operator
-The credentials operator is controlled using annotations placed on pods. To have it provision credentials and place them in Secrets, you must specify `credentials-operator.otterize.com/tls-secret-name`.
+## Provisioning AWS IAM roles using the credentials operator
+The credentials operator is controlled using annotations placed on pods. To have it provision an AWS IAM role, you must specify the pod annotation `credentials-operator.otterize.com/create-aws-role`, with the value being `true`. Once you do so, the credentials operator will provision an AWS IAM role, and automatically bind it with the Kubernetes ServiceAccount of the pod by setting the EKS role ARN annotation on the ServiceAccount and the appropriate Trust Relationship on the AWS IAM role.
-## How does the credentials operator provision credentials?
+## Provisioning mTLS certificates using the credentials operator
+The credentials operator is controlled using annotations placed on pods. To have it provision certificates and place them in Secrets, you must specify the pod annotation `credentials-operator.otterize.com/tls-secret-name`, with the value being the name of the secret. Once you do so, the credentials operator will provision a certificate for the pod, using Otterize Cloud, cert-manager or SPIRE, depending how you've deployed it.
+
+## Provisioning database username + password using the credentials operator
+The credentials operator is controlled using annotations placed on pods. To have it provision database username + password and place them in Secrets, you must specify the pod annotation `credentials-operator.otterize.com/user-password-secret-name`, with the value being the name of the secret. Once you do so, the credentials operator will provision a username and password for the pod.
+
+
+### How does the credentials operator provision certificates?
The credentials operator performs two steps in order to issue certificates.
-### Step 1: SPIRE entry registration
+#### Step 1: SPIRE entry registration
This step only happens if the operator is configured to use SPIRE for certificate generation. Once the operator [resolves the service name](#service-name-resolution-and-automatic-pod-labeling) for a pod, it labels the pod so that SPIRE can find it, and registers an entry with the SPIRE server for that label.
-### Step 2: Certificate generation
+#### Step 2: Certificate generation
The operator consults the annotation `credentials-operator.otterize.com/tls-secret-name`. If that annotation exists, the operator creates a secret named after the value of the label. That secret contains X.509 credentials within, provided by cert-manager, Otterize Cloud or SPIRE, depending on how the credentials operator is configured.
-#### cert-manager
+##### cert-manager
The operator creates a cert-manager [`Certificate`](https://cert-manager.io/docs/usage/certificate/) resource, which will create a Kubernetes Secret with the name specified by the value of the annotation `credentials-operator.otterize.com/tls-secret-name`. The common name and DNS names in the certificate are values that represent the identity of the service, as resolved by the [service identity resolution algorithm](#3-service-name-resolution-and-automatic-pod-labeling), i.e. `servicename.namespace`.
The operator will use a [`ClusterIssuer`](https://cert-manager.io/docs/concepts/issuer/) or an [`Issuer`](https://cert-manager.io/docs/concepts/issuer/) to create the Certificate resource, which it expects to find in the same namespace as the `Pod` with the annotation. The `Issuer` is configured at deploy time, using the [Helm chart](/reference/configuration/credentials-operator/helm-chart).
In the event that the default approver controller in `cert-manager` is [disabled](https://cert-manager.io/docs/concepts/certificaterequest/#approver-controller), the credentials operator can auto-approve its own [`CertificateRequests`](https://cert-manager.io/docs/concepts/certificaterequest/). Enable this capability by [configuring the Helm chart `autoApprove` flag](/reference/configuration/credentials-operator/helm-chart#cert-manager-parameters).
-#### Otterize Cloud
+##### Otterize Cloud
The operator requests certificates from Otterize Cloud, which internally manages them in Hashicorp Vault. The certificates are then placed within a Kubernetes Secret named with the value of the annotation `credentials-operator.otterize.com/tls-secret-name`.
-#### SPIRE
+##### SPIRE
Once the operator has registered the pod with SPIRE, which happens automatically for a pod that has the `credentials-operator.otterize.com/tls-secret-name` annotation upon pod startup. The credentials operator then acquires the SVID and certificates for the CA chain and places them within a Kubernetes Secret. The SVID and DNS names in the certificate is the identity of the service, as resolved by the [service identity resolution algorithm](#3-service-name-resolution-and-automatic-pod-labeling), i.e. `servicename.namespace`.
## SPIRE workload registrar
-When using SPIRE, the operator registers every pod with the SPIRE server (even those without annotations).
+When deployed with a SPIRE server, the operator registers every pod with the SPIRE server (even those without annotations).
Alongside the credentials operator, you could use SPIRE agents and the SPIRE SDK to work with the same SPIRE server.
To learn more, check out the documentation for [SPIRE](https://spiffe.io/docs/latest/spire-about/spire-concepts/). Note that to use the credentials operator, you do not need to work directly with SPIRE or SPIRE agents, and can do everything completely using annotations and Kubernetes Secrets.
diff --git a/docs/reference/configuration/intents-operator/README.mdx b/docs/reference/configuration/intents-operator/README.mdx
index d8543be88..c980d1fba 100644
--- a/docs/reference/configuration/intents-operator/README.mdx
+++ b/docs/reference/configuration/intents-operator/README.mdx
@@ -78,7 +78,7 @@ it is attempting to read, so the end result is that the topic ACLs determine act
### PostgreSQL users & access
The intents operator automatically creates, and updates credentials in PostgreSQL databases according to the declared intents. It works together with the Otterize credentials operator to easily enable secure access to PostgreSQL from client pods, all in your Kubernetes cluster.
-Try the [Just-in-time PostgreSQL users & access](https://docs.otterize.com/quickstart/access-control/postgresql) tutorial to learn more.
+Try the [Just-in-time PostgreSQL users & access](/features/postgresql/tutorials/postgres) tutorial to learn more.
### Istio AuthorizationPolicy
The intents operator automatically creates, updates and deletes Istio authorization policies, automatically looks up service accounts for client pods and labels server pods, to reflect precisely the client-to-server calls declared in client intents files.
diff --git a/docs/reference/intents-and-intents-files/README.mdx b/docs/reference/intents-and-intents-files/README.mdx
deleted file mode 100644
index 9f7e76924..000000000
--- a/docs/reference/intents-and-intents-files/README.mdx
+++ /dev/null
@@ -1,87 +0,0 @@
----
-sidebar_position: 4
-title: Intents and intents files
----
-
-Intent-based access control is, not surprisingly, centered around declaring intents — specifically, declaring **client** intents
- to call servers.
-
-The mechanism to declare client intents is with **client intents files**, or just "intents files" for short.
-This is a natural approach for agile, cloud-native organizations and initiatives:
-
-- Intents files are **declarative**;
-- Specifically, intents files declare **what** needs to happen (service A needs to access service B to do operation C)
-without specifying, or needing to know, **how** to accomplish this;
-- Intents files align with **rapid, distributed development** because they only require the knowledge that
-client developers already have &mdash no need for the target server developers or admins to keep track of who needs to access them;
-- The declarative approach thrives in **cloud-native infrastructures** where there are existing APIs to configure access control automatically.
-
-## Intents within intents files
-
-An intent is a declaration by a specific client to call a specific server, optionally specifying more granular
- information about the call (e.g. the resource path and method for HTTP, the topic name and operation for Kafka).
- In other words, an intent is a tuple of client, server, and optional granular call information.
- If any of those changes, that's logically a different intent, though the intents file format allows some
- shorthand ways of aggregating intents that only differ by HTTP method or Kafka operation. See the example below.
-
- An client's intents file specifies *all* the intents of that client, in one YAML file. Why is that important?
- Because as the client's needs change, the intents file should change with it, and any intents no longer needed
- should be removed from the file. When this updated file is applied, the corresponding access is also removed,
- i.e. the network policies or the Kafka ACLs that were previously in place due to those intents are now gone.
- In this way, access controls always reflect all of, and only, the latest intended access.
-
-## Intents file formats
-
-Client intents files are independent of the infrastructure on which IBAC is deployed — indeed, they abstract away
-any tie-ins with infrastructures and implementations of access control.
-
-As an example, let's look at the core of a client intents file for a service called `checkoutservice`.
-It declares that it will call the `emailservice`, the `orderservice`, and the `ecomm-events` Kafka service.
-It also provides more granular information for some of the calls:
-```yaml
-{@include: ../../../static/resources/example-intents.yaml}
-```
-
-You can actually create and use such "plain" or "vanilla" intents files without any other metadata. Currently, Otterize only supports processing client intents via the Otterize OSS intents operator for Kubernetes, so you'll need to run the plain intents files through the Otterize CLI (`otterize intents convert`) to convert them into Kubernetes custom resource YAML files.
-
-Within the context of a Kubernetes cluster, it's very natural to format intents files from the beginning as
-Kubernetes custom resources. These files are applied (`kubectl apply`) to the Kubernetes API, which validates
- them against a `ClientIntents` custom resource definition (CRD) and then hands them over to the Otterize intents
- operator, as expected of a Kubernetes ecosystem extension.
- The two formats are trivially related: the "plain" intents file contents are simply embedded in the `spec`
- section of the custom resource format.
-
-Here is the same client intents file, now formatted as a Kubernetes custom resource YAML,
-so it can be applied directly via `kubectl apply`:
-
-```yaml
-{@include: ../../../static/resources/example-intents-resource-highlighted.yaml}
-```
-
-## Intents file specification
-
-The core of a client intents file has 2 root-level fields (keys, in YAML):
-- `service` (required): describes the client service that intends to make the calls in this file.
- - `name` (required): specifies the name of the client service.
-
-- `calls` (required): describes the (server) services which the client intends to call.
- Its value is a list of key-value pairs, each describing a server service to be called. (The combination of client service, server service, and optionally any more granular information about the call forms an intent.)
- Each item in the list has the following fields:
- - `name` (required): specifies the name of the server service.
- The name can include the namespace of the server service, if different from the client service, separated by a dot ("."): `server-name.server-namespace`.
- - `type` (optional, case-insensitive): specifies the type of call to be made to the server.
- If present, the values can currently be `http` or `kafka`.
- The Otterize intents operator will manage network policies for all intents, regardless of type (including if no type is specified); it will *also* manage Kafka ACLs if the `type` is `kafka`; and in the future it will *also* manage HTTP access controls if the `type` is `http`.
-
- - `kafkaTopics` (optional; only allowed if `type` is `kafka`): Specifies the list of Kafka topics to be called.
- Each item in the list has the following fields:
- - `name` (required): specifies the name of the topic.
- - `operations` (required): specifies the list of intended operations on that topic.
- Allowed values are: `consume`, `produce`, `create`, `alter`, `delete`, `describe`, `cluster_action`, `describe_configs`, `alter_configs`, `idempotent_write`, and `all`.
-
- - `HTTPResources` (optional; only allowed if `type` is `http`): Specifies the list of HTTP resources to be called.
- Each item in the list has the following fields:
- - `path` (required): specifies the HTTP path of the resource.
- - `methods` (required): specifies the list of intended operations on that topic.
- Allowed values are: `get`, `post`, `put`, `patch`, `delete`, `options`, `trace`, and `connect`.
-
diff --git a/docs/shadow-vs-active-enforcement/README.mdx b/docs/reference/shadow-vs-active-enforcement/README.mdx
similarity index 100%
rename from docs/shadow-vs-active-enforcement/README.mdx
rename to docs/reference/shadow-vs-active-enforcement/README.mdx
diff --git a/docs/reference/terminology/README.mdx b/docs/reference/terminology/README.mdx
index 41a1b82a9..1178da535 100644
--- a/docs/reference/terminology/README.mdx
+++ b/docs/reference/terminology/README.mdx
@@ -17,6 +17,9 @@ The [Otterize CLI](/reference/cli) is a command-line utility used to control and
### Intent (or client intent)
Otterize intents are a way to declare that one service intends to call another service. Otterize uses them to apply authorization rules to enable the calls to go through, and block any unintended calls. An *intent* refers to a client declaring a particular call to a server; *all* a given client's intents to the servers it intends to call are collected in a single *client intents file*. [Learn more about intents](/reference/intents-and-intents-files).
+### Integrations
+Otterize Cloud supports two types of integrations: Kubernetes integrations and generic integrations. Kubernetes integrations connect a Kubernetes cluster to Otterize Cloud, allowing communication with the Otterize operators. Generic integrations connect external systems to Otterize Cloud, providing API/CLI access credentials. These integrations are named based on their usage and must have unique names within an organization.
+
## Identity
### PKI
@@ -57,3 +60,12 @@ A Kubernetes custom resource refers to a resource that is not present in the bas
### CNI (Container Network Interface)
CNI is a CNCF project that provides libraries for implementing plugins for configuring network interfaces in Linux containers, and is used by Kubernetes to provide pods running in a cluster with network connectivity.
Examples of CNI plugins are Calico, Cilium, the AWS VPC CNI plugin. [Read more about Kubernetes CNI plugins here](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/).
+
+### Services
+Services represent the nodes in the access graph. They can be client services, server services, or both, and they are associated with specific namespaces within a Kubernetes cluster. Otterize Cloud infers services from reported intents and provides additional information for Kafka services and certificates. Service names must be unique within a namespace, but not necessarily across the entire cluster or multiple clusters.
+
+### Namespaces and Environments
+Namespaces are used to group related services within a Kubernetes cluster and can be mapped to different environments (e.g., dev, staging, production). Intents can be cross-namespace and cross-environment, and Otterize Cloud associates namespaces with their respective environments. Environment names must be unique within an organization.
+
+### Clusters
+A Kubernetes cluster connected to Otterize Cloud is represented by a cluster object in the cloud. This object contains information about the cluster's intents, services, credentials, and configuration. Multiple clusters and namespaces can belong to a single environment, or environments can span multiple clusters, depending on the organization's needs. Cluster names must be unique within an organization.
\ No newline at end of file
diff --git a/docs/troubleshooting/README.mdx b/docs/reference/troubleshooting/README.mdx
similarity index 100%
rename from docs/troubleshooting/README.mdx
rename to docs/reference/troubleshooting/README.mdx
diff --git a/docusaurus.config.js b/docusaurus.config.js
index 8db8110aa..de800e73b 100644
--- a/docusaurus.config.js
+++ b/docusaurus.config.js
@@ -136,61 +136,89 @@ const config = {
['@docusaurus/plugin-client-redirects',
{
redirects: [
+ {
+ from: ['/getting-started/oss-installation', '/installation'],
+ to: '/overview/installation',
+ },
+ {
+ from: ['/intent-based-access-control', '/reference/intents-and-intents-files'],
+ to: '/overview/intent-based-access-control'
+ },
+ {
+ from: ['/otterize-oss'],
+ to: '/overview/otterize-oss'
+ },
+ {
+ from: ['/reference/access-controls/network-policies'],
+ to: '/features/network-mapping-network-policies/reference/Network-Policies-Deep-Dive'
+ },
+ {
+ from: ['/shadow-vs-active-enforcement'],
+ to: '/reference/shadow-vs-active-enforcement'
+ },
+ {
+ from: ['/otterize-cloud'],
+ to: '/overview/otterize-cloud'
+ },
{
from: '/quick-tutorials',
to: '/',
},
{
- from: '/quick-tutorials/k8s-mtls',
- to: '/quickstart/access-control/k8s-kafka-mtls-cert-manager',
+ from: ['/otterize-oss/usage-telemetry'],
+ to: '/overview/otterize-oss/usage-telemetry'
},
{
- from: '/getting-started/oss-installation',
- to: '/installation',
+ from: ['/otterize-oss/error-telemetry'],
+ to: '/overview/otterize-oss/error-telemetry'
},
{
- from: '/quick-tutorials/k8s-network-policies',
- to: '/quickstart/access-control/k8s-network-policies',
+ from: ['/otterize-cloud/object-model'],
+ to: '/reference/terminology'
},
{
- from: '/quick-tutorials/k8s-istio-authorization-policies',
- to: '/quickstart/access-control/k8s-istio-authorization-policies',
+ from: ['/guides/protect-1-service-network-policies'],
+ to: '/features/network-mapping-network-policies/tutorials/protect-1-service-network-policies'
},
{
- from: '/quick-tutorials/k8s-kafka-mtls',
- to: '/quickstart/access-control/k8s-kafka-mtls',
+ from: ['/quick-tutorials/k8s-kafka-mtls', '/quickstart/access-control/k8s-kafka-mtls'],
+ to: '/features/kafka/tutorials/k8s-kafka-mtls',
},
{
- from: '/quick-tutorials/aws-eks-cni-mini',
- to: '/quickstart/access-control/aws-eks-cni-mini',
+ from: ['/quick-tutorials/aws-eks-cni-mini','/quickstart/access-control/aws-eks-cni-mini'],
+ to: '/features/network-mapping-network-policies/tutorials/aws-eks-cni-mini',
},
{
- from: '/quick-tutorials/k8s-kafka-mtls-cert-manager',
- to: '/quickstart/access-control/k8s-kafka-mtls-cert-manager',
+ from: ['/quick-tutorials/k8s-kafka-mtls-cert-manager', '/quickstart/access-control/k8s-kafka-mtls-cert-manager', '/quick-tutorials/k8s-mtls'],
+ to: '/features/kafka/tutorials/k8s-kafka-mtls-cert-manager',
},
{
- from: '/quick-tutorials/k8s-network-mapper',
- to: '/quickstart/visualization/k8s-network-mapper',
+ from: ['/quick-tutorials/k8s-istio-watcher', '/quickstart/visualization/k8s-istio-watcher'],
+ to: '/features/istio/tutorials/k8s-istio-watcher',
},
{
- from: '/quick-tutorials/k8s-istio-watcher',
- to: '/quickstart/visualization/k8s-istio-watcher',
+ from: ['/quick-visual-tutorials/visual-ibac-istio-authorization-policies','/quickstart/access-control/k8s-istio-authorization-policies', '/quickstart/k8s-istio-authorization-policies'],
+ to: '/features/istio/tutorials/k8s-istio-authorization-policies',
},
{
- from: '/quick-visual-tutorials/visual-ibac-istio-authorization-policies',
- to: '/quickstart/access-control/k8s-istio-authorization-policies',
+ from: ['/quick-visual-tutorials/visual-ibac-kafka-k8s'],
+ to: '/features/kafka/tutorials/k8s-kafka-mapping',
},
{
- from: '/quick-visual-tutorials/visual-ibac-kafka-k8s',
- to: '/quickstart/access-control/k8s-kafka-mtls',
+ from: ['/quickstart/visualization/postgresql'],
+ to: '/features/postgresql/tutorials/postgres-mapping'
},
{
- from: '/quick-visual-tutorials/visual-ibac-network-policies',
- to: '/quickstart/access-control/k8s-network-policies',
+ from: ['/quickstart/access-control/postgresql'],
+ to: '/features/postgresql/tutorials/postgres'
},
{
- from: '/quick-visual-tutorials/visual-k8s-cluster-mapping',
- to: '/quickstart/visualization/k8s-network-mapper',
+ from: ['/quick-visual-tutorials/visual-ibac-network-policies', '/quick-tutorials/k8s-network-policies', '/quickstart/access-control/k8s-network-policies'],
+ to: '/features/network-mapping-network-policies/tutorials/k8s-network-policies',
+ },
+ {
+ from: ['/quick-visual-tutorials/visual-k8s-cluster-mapping', '/quickstart/visualization/k8s-network-mapper', '/quick-tutorials/k8s-network-mapper'],
+ to: '/features/network-mapping-network-policies/tutorials/k8s-network-mapper',
},
// Redirect from multiple old paths to the new path
// {
@@ -389,6 +417,11 @@ const config = {
theme: lightCodeTheme,
darkTheme: darkCodeTheme,
},
+ docs: {
+ sidebar: {
+ autoCollapseCategories: true,
+ },
+ },
colorMode: {
defaultMode: "light",
disableSwitch: true,
diff --git a/package.json b/package.json
index 70dbf3159..d2beb76e9 100644
--- a/package.json
+++ b/package.json
@@ -24,9 +24,11 @@
"clsx": "^1.2.1",
"docusaurus-plugin-hotjar": "^0.0.2",
"docusaurus-plugin-includes": "^1.1.4",
+ "heroicons": "^2.1.1",
"prism-react-renderer": "^1.3.5",
"react": "^17.0.2",
- "react-dom": "^17.0.2"
+ "react-dom": "^17.0.2",
+ "vercel": "^33.4.0"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "^2.4.3",
diff --git a/src/components/CardList/index.js b/src/components/CardList/index.js
new file mode 100644
index 000000000..c7f8a5e8e
--- /dev/null
+++ b/src/components/CardList/index.js
@@ -0,0 +1,29 @@
+import React from 'react';
+
+export default function CardList(props) {
+ console.log(JSON.stringify(props));
+ return (
+