diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index d9329f68ad2e1..acb4f94b6aad6 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,6 +1,6 @@ { "name": "Cilium", - "image": "quay.io/cilium/cilium-builder:dce64df3892bec24c003f97edcb8aae51640d97d@sha256:dbcda5ef242a9f5f138b733dbf22437c4134106dad6127041653f947b81e5367", + "image": "quay.io/cilium/cilium-builder:f5e2620e352d7bc93f5fa2e2309786102e73c014@sha256:5afb2abd71a4725003c967ee47d9555df10da1e754c07ffc48dfa8e615b83fd5", "workspaceFolder": "/go/src/github.com/cilium/cilium", "workspaceMount": "source=${localWorkspaceFolder},target=/go/src/github.com/cilium/cilium,type=bind", "features": { diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index dbb908ae64986..c3ad54b6e7863 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -24,11 +24,11 @@ body: **NOTE: If your version is NOT listed then please upgrade before opening the GH issue. Thank you** options: # renovate: datasource=github-tags depName=cilium/cilium - - 'equal or higher than v1.18.1 and lower than v1.19.0' + - 'equal or higher than v1.18.2 and lower than v1.19.0' # renovate: datasource=github-tags depName=cilium/cilium - 'equal or higher than v1.17.7 and lower than v1.18.0' # renovate: datasource=github-tags depName=cilium/cilium - - 'equal or higher than v1.16.13 and lower than v1.17.0' + - 'equal or higher than v1.16.14 and lower than v1.17.0' validations: required: true - type: textarea diff --git a/.github/actions/e2e/configs.yaml b/.github/actions/e2e/configs.yaml index ac9cf3117d788..9518263c99315 100644 --- a/.github/actions/e2e/configs.yaml +++ b/.github/actions/e2e/configs.yaml @@ -8,7 +8,7 @@ misc: 'bpfClockProbe=false,cni.uninstall=false,tls.readSecretsOnlyFromSecretsNamespace=false,tls.secretSync.enabled=false' - name: '2' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.10-20250812.093650' + kernel: '5.10-20250917.183741' kube-proxy: 'iptables' kpr: 'false' tunnel: 'disabled' @@ -16,7 +16,7 @@ misc: 'bpfClockProbe=false,cni.uninstall=false,tls.readSecretsOnlyFromSecretsNamespace=false,tls.secretSync.enabled=false' - name: '3' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.10-20250812.093650' + kernel: '5.10-20250917.183741' kube-proxy: 'iptables' kpr: 'false' tunnel: 'disabled' @@ -24,7 +24,7 @@ kvstore: 'true' - name: '4' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.10-20250812.093650' + kernel: '5.10-20250917.183741' kube-proxy: 'iptables' kpr: 'true' devices: '{eth0,eth1}' @@ -36,7 +36,7 @@ ingress-controller: 'true' - name: '5' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.15-20250812.093650' + kernel: '5.15-20250917.183741' kube-proxy: 'iptables' kpr: 'true' devices: '{eth0,eth1}' @@ -49,7 +49,7 @@ ingress-controller: 'true' - name: '6' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.1-20250812.093650' + kernel: '6.1-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -63,7 +63,7 @@ bgp-control-plane: 'true' - name: '7' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -78,7 +78,7 @@ misc: 'bpfClockProbe=false,cni.uninstall=false,tls.secretsBackend=k8s,tls.secretSync.enabled=true' - name: '8' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' kube-proxy: 'iptables' kpr: 'false' tunnel: 'geneve' @@ -89,7 +89,7 @@ skip-include-conn-disrupt-test-ns-traffic: 'true' - name: '9' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.10-20250812.093650' + kernel: '5.10-20250917.183741' kube-proxy: 'iptables' kpr: 'true' devices: '{eth0,eth1}' @@ -103,7 +103,7 @@ ingress-controller: 'true' - name: '10' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.15-20250812.093650' + kernel: '5.15-20250917.183741' kube-proxy: 'iptables' kpr: 'false' tunnel: 'disabled' @@ -116,7 +116,7 @@ kvstore: 'true' - name: '11' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.1-20250812.093650' + kernel: '6.1-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -132,7 +132,7 @@ node-local-dns: 'true' - name: '12' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -157,7 +157,7 @@ # explains why 5.4 might cause north-south-loadbalancing tests to # fail. # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.15-20250812.093650' + kernel: '5.15-20250917.183741' kube-proxy: 'iptables' kpr: 'true' devices: '{eth0,eth1}' @@ -169,7 +169,7 @@ ingress-controller: 'true' - name: '15' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -181,7 +181,7 @@ ciliumendpointslice: 'true' - name: '16' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.15-20250812.093650' + kernel: '5.15-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -195,7 +195,7 @@ ingress-controller: 'true' - name: '17' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' misc: 'bpf.datapathMode=netkit,enableIPv4BIGTCP=true,enableIPv6BIGTCP=true' kube-proxy: 'none' kpr: 'true' @@ -206,7 +206,7 @@ ingress-controller: 'true' - name: '18' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' misc: 'bpf.datapathMode=netkit-l2,enableIPv4BIGTCP=true,enableIPv6BIGTCP=true' kube-proxy: 'none' kpr: 'true' @@ -217,7 +217,7 @@ ingress-controller: 'true' - name: '19' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' misc: 'bpf.datapathMode=netkit' kube-proxy: 'none' kpr: 'true' @@ -228,7 +228,7 @@ kvstore: 'true' - name: '20' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' misc: 'bpf.datapathMode=netkit-l2' kube-proxy: 'none' kpr: 'true' @@ -238,7 +238,7 @@ ingress-controller: 'true' - name: '21' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' misc: 'bpf.datapathMode=netkit' kube-proxy: 'none' kpr: 'true' @@ -248,7 +248,7 @@ ingress-controller: 'true' - name: '22' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' misc: 'bpf.datapathMode=netkit-l2' kube-proxy: 'none' kpr: 'true' @@ -258,7 +258,7 @@ ingress-controller: 'true' - name: '23' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' misc: 'bpf.datapathMode=netkit' kube-proxy: 'none' kpr: 'true' @@ -270,7 +270,7 @@ host-fw: 'true' - name: '24' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' misc: 'bpf.datapathMode=netkit,enableIPv4BIGTCP=true,enableIPv6BIGTCP=true' kube-proxy: 'none' kpr: 'true' @@ -283,7 +283,7 @@ ingress-controller: 'true' - name: '25' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' misc: 'bpf.datapathMode=netkit-l2,enableIPv4BIGTCP=true,enableIPv6BIGTCP=true' kube-proxy: 'none' kpr: 'true' @@ -296,7 +296,7 @@ ingress-controller: 'true' - name: '26' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.6-20250812.093650' + kernel: '6.6-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -307,7 +307,7 @@ ingress-controller: 'true' - name: '27' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.6-20250812.093650' + kernel: '6.6-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -318,7 +318,7 @@ ingress-controller: 'true' - name: '28' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.6-20250812.093650' + kernel: '6.6-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -330,7 +330,7 @@ ingress-controller: 'true' - name: '29' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -343,7 +343,7 @@ skip-upgrade: 'true' - name: '30' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -354,7 +354,7 @@ ingress-controller: 'true' - name: '31' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -366,7 +366,7 @@ ingress-controller: 'true' - name: '32' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.15-20250812.093650' + kernel: '5.15-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -378,7 +378,7 @@ skip-upgrade: 'true' - name: '33' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' kube-proxy: 'iptables' kpr: 'false' ipv6: 'true' diff --git a/.github/actions/e2e/ipsec_configs.yaml b/.github/actions/e2e/ipsec_configs.yaml index dffadd2402cfc..d50d634dabaa9 100644 --- a/.github/actions/e2e/ipsec_configs.yaml +++ b/.github/actions/e2e/ipsec_configs.yaml @@ -9,7 +9,7 @@ key-two: 'rfc4106-gcm-aes' - name: 'ipsec-2' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.10-20250812.093650' + kernel: '5.10-20250917.183741' kube-proxy: 'iptables' kpr: 'false' tunnel: 'disabled' @@ -18,7 +18,7 @@ key-two: 'cbc-aes-sha256' - name: 'ipsec-3' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.10-20250812.093650' + kernel: '5.10-20250917.183741' kube-proxy: 'iptables' kpr: 'false' tunnel: 'disabled' @@ -29,7 +29,7 @@ kvstore: 'true' - name: 'ipsec-4' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' kube-proxy: 'iptables' kpr: 'false' tunnel: 'geneve' @@ -40,7 +40,7 @@ kvstore: 'true' - name: 'ipsec-5' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.10-20250812.093650' + kernel: '5.10-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -51,7 +51,7 @@ key-two: 'cbc-aes-sha256' - name: 'ipsec-6' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.10-20250812.093650' + kernel: '5.10-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -65,7 +65,7 @@ key-two: 'rfc4106-gcm-aes' - name: 'ipsec-7' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '5.15-20250812.093650' + kernel: '5.15-20250917.183741' kube-proxy: 'iptables' kpr: 'false' tunnel: 'vxlan' @@ -76,7 +76,7 @@ skip-upgrade: 'true' - name: 'ipsec-8' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' @@ -89,7 +89,7 @@ key-two: 'rfc4106-gcm-aes' - name: 'ipsec-9' # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: '6.12-20250812.093650' + kernel: '6.12-20250917.183741' kube-proxy: 'none' kpr: 'true' devices: '{eth0,eth1}' diff --git a/.github/actions/eks/k8s-versions.yaml b/.github/actions/eks/k8s-versions.yaml index 00bef4560f321..9530629d6128a 100644 --- a/.github/actions/eks/k8s-versions.yaml +++ b/.github/actions/eks/k8s-versions.yaml @@ -9,10 +9,8 @@ include: - version: "1.32" region: ca-central-1 default: true - ipsec: true kpr: true aws-eni-pd: true - version: "1.33" region: us-east-1 - ipsec: true default: true diff --git a/.github/actions/ginkgo/main-focus.yaml b/.github/actions/ginkgo/main-focus.yaml index 4afd0bbfcbb6f..d39f17bc615c0 100644 --- a/.github/actions/ginkgo/main-focus.yaml +++ b/.github/actions/ginkgo/main-focus.yaml @@ -247,7 +247,7 @@ exclude: - k8s-version: "1.32" focus: "f11-datapath-service-ns-tc" - - k8s-version: "1.28" + - k8s-version: "1.32" focus: "f12-datapath-service-ns-misc" - k8s-version: "1.32" diff --git a/.github/actions/ginkgo/main-k8s-versions.yaml b/.github/actions/ginkgo/main-k8s-versions.yaml index 8698280be8030..8c674a2494f29 100644 --- a/.github/actions/ginkgo/main-k8s-versions.yaml +++ b/.github/actions/ginkgo/main-k8s-versions.yaml @@ -6,7 +6,7 @@ include: # renovate: datasource=docker kube-image: "quay.io/cilium/kindest-node:v1.34.0@sha256:7416a61b42b1662ca6ca89f02028ac133a309a2a30ba309614e8ec94d976dc5a" # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: "6.12-20250812.093650" + kernel: "6.12-20250917.183741" kernel-type: "latest" - k8s-version: "1.33" @@ -30,5 +30,5 @@ include: # renovate: datasource=docker kube-image: "quay.io/cilium/kindest-node:v1.31.0@sha256:d2b2a8cd6fa282b9a4126938341a4d2924dfa96f60b1f983d519498c9cde1a99" # renovate: datasource=docker depName=quay.io/lvh-images/kind - kernel: "5.10-20250812.093650" + kernel: "5.10-20250917.183741" kernel-type: "stable" diff --git a/.github/actions/gke/test-config-helm.yaml b/.github/actions/gke/test-config-helm.yaml index 369fc84eea4b4..843858a6e1c7f 100644 --- a/.github/actions/gke/test-config-helm.yaml +++ b/.github/actions/gke/test-config-helm.yaml @@ -6,13 +6,7 @@ config: - type: "tunnel" index: 2 cilium-install-opts: "--datapath-mode=tunnel" - - type: "ipsec" - index: 3 - cilium-install-opts: "--helm-set=encryption.enabled=true --helm-set=encryption.type=ipsec" - - type: "tunnel-ipsec" - index: 4 - cilium-install-opts: "--helm-set=encryption.enabled=true --helm-set=encryption.type=ipsec --datapath-mode=tunnel" - type: "tunnel-ingress-controller" - index: 5 + index: 3 cilium-install-opts: "--helm-set=kubeProxyReplacement=true --helm-set=ingressController.enabled=true" nodes: 1 diff --git a/.github/actions/kvstore/action.yaml b/.github/actions/kvstore/action.yaml index 9ca5c9c85c9ee..7640e2786b847 100644 --- a/.github/actions/kvstore/action.yaml +++ b/.github/actions/kvstore/action.yaml @@ -7,7 +7,7 @@ inputs: default: "1" etcd-image: description: "etcd docker image" - default: gcr.io/etcd-development/etcd:v3.6.4@sha256:5d10878e4fd4ebfdf82bc142fb044542a3ca514c0ee169277643a84d6816892a + default: gcr.io/etcd-development/etcd:v3.6.5@sha256:042ef9c02799eb9303abf1aa99b09f09d94b8ee3ba0c2dd3f42dc4e1d3dce534 name: description: "Base name of the etcd containers (to which the index is appended)" default: kvstore diff --git a/.github/actions/set-runtime-image/runtime-image.txt b/.github/actions/set-runtime-image/runtime-image.txt index 4ff19f61a2d12..3688134bc8d1a 100644 --- a/.github/actions/set-runtime-image/runtime-image.txt +++ b/.github/actions/set-runtime-image/runtime-image.txt @@ -1 +1 @@ -quay.io/cilium/cilium-runtime:22e31ca0018cad492dcb99bc08368dc11fac28de@sha256:ae2f5413d4cbea8bfa2c6d05f499b5a6a3512bddf1844bb059e1a8b57b1137de \ No newline at end of file +quay.io/cilium/cilium-runtime:061e77638353be08d1a792ef38b8f4597344139d@sha256:db098a1d65275c3d3f8090706feaab9a30c437fb24942b25ffdea06a010c5bae \ No newline at end of file diff --git a/.github/ariane-config.yaml b/.github/ariane-config.yaml index bee94325e17ad..ee235abed19f7 100644 --- a/.github/ariane-config.yaml +++ b/.github/ariane-config.yaml @@ -38,6 +38,9 @@ triggers: /ci-ipsec-upgrade: workflows: - tests-ipsec-upgrade.yaml + /ci-ipsec: + workflows: + - conformance-ipsec.yaml /ci-ipsec-e2e: workflows: - conformance-ipsec-e2e.yaml diff --git a/.github/workflows/auto-approve.yaml b/.github/workflows/auto-approve.yaml index f2e72af2ba529..b641c0357541e 100644 --- a/.github/workflows/auto-approve.yaml +++ b/.github/workflows/auto-approve.yaml @@ -12,7 +12,8 @@ jobs: runs-on: ubuntu-24.04 if: ${{ github.event.pull_request.user.login == 'cilium-renovate[bot]' && - github.triggering_actor == 'cilium-renovate[bot]' && + (github.triggering_actor == 'cilium-renovate[bot]' || + github.triggering_actor == 'auto-committer[bot]') && github.event.requested_reviewer.login == 'ciliumbot' }} steps: @@ -36,12 +37,13 @@ jobs: - name: Approve PR # Approve the PR if all the following conditions are true: - # - the PR review was requested by renovate bot and - # - the PR was also created by renovate bot + # - the PR was created by renovate bot and + # - the PR review was requested by renovate bot or auto-committer[bot] and # - the requested reviewer was the trusted 'ciliumbot' if: ${{ github.event.pull_request.user.login == 'cilium-renovate[bot]' && - github.triggering_actor == 'cilium-renovate[bot]' && + (github.triggering_actor == 'cilium-renovate[bot]' || + github.triggering_actor == 'auto-committer[bot]') && github.event.requested_reviewer.login == 'ciliumbot' }} env: diff --git a/.github/workflows/build-images-ci.yaml b/.github/workflows/build-images-ci.yaml index c90471c5d32fd..cffdde4b278f1 100644 --- a/.github/workflows/build-images-ci.yaml +++ b/.github/workflows/build-images-ci.yaml @@ -19,16 +19,33 @@ permissions: contents: read # Required to generate OIDC tokens for `sigstore/cosign-installer` authentication id-token: write + # To be able to check if base images were built + actions: read concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.event.after || (github.event_name == 'merge_group' && github.run_id) }} cancel-in-progress: true jobs: + wait-for-base-images: + name: Wait for lint checks + uses: ./.github/workflows/wait-for-status-check.yaml + with: + # Only run this job if the event is pull_request_target and if the PR + # is not opened from a fork. + # This is to avoid waiting for base images on push to main or merge_group + # events as the lint-images-base does not run on those events. + if: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name }} + sha: ${{ github.event.pull_request.head.sha || github.sha }} + lint-workflows: "lint-images-base.yaml" + timeout-minutes: 2 + poll-interval: 15 + build-and-push-prs: timeout-minutes: 45 name: Build and Push Images runs-on: ${{ vars.GH_RUNNER_EXTRA_POWER_UBUNTU_LATEST || 'ubuntu-24.04' }} + needs: wait-for-base-images outputs: sha: ${{ steps.tag.outputs.sha }} strategy: @@ -382,3 +399,37 @@ jobs: checkout_ref: ${{ needs.build-and-push-prs.outputs.sha }} image_tag: ${{ needs.build-and-push-prs.outputs.sha }} secrets: inherit + + pre-comment: + # Avoid running the "Trigger CI from renovate PRs" environment if we don't need to. + name: Pre-Comment + needs: build-and-push-prs + runs-on: ubuntu-24.04 + if: ${{ + github.event_name == 'pull_request_target' && + github.event.pull_request.user.login == vars.RENOVATE_BOT_USERNAME + }} + steps: + - name: Debug + run: | + echo ${{ github.event.pull_request.user.login }} + echo ${{ github.event.event_name }} + + comment: + name: Post test comment for Renovate PRs after images built + runs-on: ubuntu-24.04 + needs: pre-comment + environment: "Trigger CI from renovate PRs" + if: ${{ + github.event_name == 'pull_request_target' && + github.event.pull_request.user.login == vars.RENOVATE_BOT_USERNAME + }} + steps: + - name: Post /test comment + env: + TOKEN: ${{ secrets.AUTO_COMMENT_TOKEN }} + GITHUB_REPOSITORY: ${{ github.repository }} + PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }} + run: | + echo ${TOKEN} | gh auth login --with-token + gh pr --repo ${GITHUB_REPOSITORY} comment ${PULL_REQUEST_NUMBER} --body "/test" diff --git a/.github/workflows/build-images-releases.yaml b/.github/workflows/build-images-releases.yaml index 3dc44c88be1b8..cd41cd6a386d0 100644 --- a/.github/workflows/build-images-releases.yaml +++ b/.github/workflows/build-images-releases.yaml @@ -225,6 +225,16 @@ jobs: with: step: "4-post-release" version: ${{ github.ref_name }} + # This is the intended behavior of GitHub Actions. Declaring entries under + # "secrets:" grants the called workflow permission to read + # CILIUM_RELEASE_BOT_PEM and CILIUM_RELEASE_BOT_APP_ID. It does not pass + # literal values here; the values are resolved only in the called + # workflow's scope. If the called workflow sets an environment + # (e.g., environment: TestEnvironment), environment-scoped secrets are + # populated there and become available to its steps. + secrets: + CILIUM_RELEASE_BOT_PEM: ${{ secrets.CILIUM_RELEASE_BOT_PEM }} + CILIUM_RELEASE_BOT_APP_ID: ${{ secrets.CILIUM_RELEASE_BOT_APP_ID }} call-publish-helm: name: Publish Helm Chart @@ -233,3 +243,13 @@ jobs: with: step: "5-publish-helm" version: ${{ github.ref_name }} + # This is the intended behavior of GitHub Actions. Declaring entries under + # "secrets:" grants the called workflow permission to read + # CILIUM_RELEASE_BOT_PEM and CILIUM_RELEASE_BOT_APP_ID. It does not pass + # literal values here; the values are resolved only in the called + # workflow's scope. If the called workflow sets an environment + # (e.g., environment: TestEnvironment), environment-scoped secrets are + # populated there and become available to its steps. + secrets: + CILIUM_RELEASE_BOT_PEM: ${{ secrets.CILIUM_RELEASE_BOT_PEM }} + CILIUM_RELEASE_BOT_APP_ID: ${{ secrets.CILIUM_RELEASE_BOT_APP_ID }} diff --git a/.github/workflows/conformance-aks.yaml b/.github/workflows/conformance-aks.yaml index ae61d28909026..cd9168c04299c 100644 --- a/.github/workflows/conformance-aks.yaml +++ b/.github/workflows/conformance-aks.yaml @@ -2,6 +2,31 @@ name: Conformance AKS (ci-aks) # Any change in triggers needs to be reflected in the concurrency group. on: + workflow_call: + inputs: + PR-number: + description: "Pull request number." + required: false + type: string + context-ref: + description: "Context in which the workflow runs. If PR is from a fork, will be the PR target branch (general case). If PR is NOT from a fork, will be the PR branch itself (this allows committers to test changes to workflows directly from PRs)." + required: false + type: string + SHA: + description: "SHA under test (head of the PR branch)." + required: false + type: string + extra-args: + description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." + required: false + type: string + default: '{}' + is-workflow-call: + description: "Distinguish if it's a workflow_call event." + required: false + type: boolean + default: true + workflow_dispatch: inputs: PR-number: @@ -17,12 +42,9 @@ on: description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." required: false default: '{}' - push: - branches: - - 'renovate/main-**' - # Run every 8 hours + # Run every 12 hours schedule: - - cron: '0 0/8 * * *' + - cron: '0 0/12 * * *' # By specifying the access of one of the scopes, all of those that are not # specified are set to 'none'. @@ -44,7 +66,10 @@ concurrency: # - Event type # - A unique identifier depending on event type: # - schedule: SHA - # - workflow_dispatch: PR number + # - workflow_dispatch: PR number. Because in workflow_call the parent context + # is used, we need to add a prefix name ('aks') for the concurrency group + # so that workflows that are executed in parallel don't conflict with each + # other. # # This structure ensures a unique concurrency group name is generated for each # type of testing, such that re-runs will cancel the previous run. @@ -52,9 +77,8 @@ concurrency: ${{ github.workflow }} ${{ github.event_name }} ${{ - (github.event_name == 'push' && github.sha) || - (github.event_name == 'schedule' && github.sha) || - (github.event_name == 'workflow_dispatch' && github.event.inputs.PR-number) + (github.event_name == 'schedule' && format('{0}-{1}', github.sha, 'aks')) || + (github.event_name == 'workflow_dispatch' && format('{0}-{1}', github.event.inputs.PR-number, 'aks')) }} cancel-in-progress: true @@ -220,14 +244,20 @@ jobs: OWNER="${OWNER//[.\/]/-}" fi + # Check if ipsec is requested via extra-args + IPSEC=$(echo '${{ inputs.extra-args }}' | jq -r '.["ipsec"] // empty') + if [[ "$IPSEC" == "true" ]]; then + echo "::notice::IPsec enabled - setting ipsec=true for all matrix entries" + fi + # We explicity set the cluster-pool Pod CIDR here to not clash with the AKS default Service CIDRs # which are 10.0.0.0/16 and fd12:3456:789a:1::/108 CILIUM_INSTALL_DEFAULTS="${{ steps.default_vars.outputs.cilium_install_defaults }} \ --datapath-mode=aks-byocni \ - --helm-set cluster.name=${{ env.name }} \ - --helm-set loadBalancer.l7.backend=envoy \ + --helm-set=cluster.name=${{ env.name }} \ + --helm-set=loadBalancer.l7.backend=envoy \ --helm-set=azure.resourceGroup=${{ env.name }} \ - --helm-set kubeProxyReplacement=true \ + --helm-set=kubeProxyReplacement=true \ --helm-set=bpf.masquerade=true \ --helm-set=ipv4.enabled=true \ --helm-set=ipv6.enabled=true \ @@ -240,6 +270,7 @@ jobs: echo connectivity_test_defaults=${CONNECTIVITY_TEST_DEFAULTS} >> $GITHUB_OUTPUT echo sha=${{ steps.default_vars.outputs.sha }} >> $GITHUB_OUTPUT echo owner=${OWNER} >> $GITHUB_OUTPUT + echo ipsec=${IPSEC} >> $GITHUB_OUTPUT - name: Login to Azure uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 @@ -327,28 +358,34 @@ jobs: json-filename: "${{ env.job_name }} (${{ join(matrix.*, ', ') }}) - 1" - name: Clean up Cilium + if: ${{ steps.vars.outputs.ipsec == 'true' }} run: | cilium uninstall --wait - name: Create custom IPsec secret + if: ${{ steps.vars.outputs.ipsec == 'true' }} run: | cilium encrypt create-key --auth-algo rfc4106-gcm-aes - name: Install Cilium with encryption + if: ${{ steps.vars.outputs.ipsec == 'true' }} run: | cilium install ${{ steps.vars.outputs.cilium_install_defaults }} \ --helm-set encryption.enabled=true \ --helm-set encryption.type=ipsec - name: Enable Relay + if: ${{ steps.vars.outputs.ipsec == 'true' }} run: | cilium hubble enable - name: Wait for Cilium status to be ready + if: ${{ steps.vars.outputs.ipsec == 'true' }} run: | cilium status --wait --interactive=false --wait-duration=10m - name: Run sequential connectivity test with IPSec (${{ join(matrix.*, ', ') }}) + if: ${{ steps.vars.outputs.ipsec == 'true' }} run: | cilium connectivity test ${{ steps.vars.outputs.connectivity_test_defaults }} --force-deploy \ --test "seq-.*" \ @@ -356,6 +393,7 @@ jobs: --junit-property github_job_step="Run connectivity test with IPSec (${{ join(matrix.*, ', ') }})" - name: Run concurrent connectivity test with IPSec (${{ join(matrix.*, ', ') }}) + if: ${{ steps.vars.outputs.ipsec == 'true' }} run: | cilium connectivity test ${{ steps.vars.outputs.connectivity_test_defaults }} --force-deploy \ --test-concurrency=${{ env.test_concurrency }} \ @@ -378,7 +416,7 @@ jobs: merge-upload-and-status: name: Merge Upload and Status - if: ${{ always() }} + if: ${{ always() && !inputs.is-workflow-call }} needs: installation-and-connectivity uses: ./.github/workflows/common-post-jobs.yaml secrets: inherit diff --git a/.github/workflows/conformance-aws-cni.yaml b/.github/workflows/conformance-aws-cni.yaml index 2a54806ef511c..32942f7979428 100644 --- a/.github/workflows/conformance-aws-cni.yaml +++ b/.github/workflows/conformance-aws-cni.yaml @@ -17,9 +17,6 @@ on: description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." required: false default: '{}' - push: - branches: - - 'renovate/main-**' # Run every 8 hours schedule: - cron: '30 0/8 * * *' @@ -52,7 +49,6 @@ concurrency: ${{ github.workflow }} ${{ github.event_name }} ${{ - (github.event_name == 'push' && github.sha) || (github.event_name == 'schedule' && github.sha) || (github.event_name == 'workflow_dispatch' && github.event.inputs.PR-number) }} @@ -63,7 +59,7 @@ env: # renovate: datasource=github-releases depName=eksctl-io/eksctl eksctl_version: v0.214.0 # renovate: datasource=github-releases depName=kubernetes/kubernetes - kubectl_version: v1.34.0 + kubectl_version: v1.34.1 jobs: echo-inputs: diff --git a/.github/workflows/conformance-clustermesh.yaml b/.github/workflows/conformance-clustermesh.yaml index e2c859a7d7e96..73b9afb64b4fc 100644 --- a/.github/workflows/conformance-clustermesh.yaml +++ b/.github/workflows/conformance-clustermesh.yaml @@ -22,7 +22,6 @@ on: branches: - main - ft/main/** - - 'renovate/main-**' paths-ignore: - 'Documentation/**' @@ -566,13 +565,12 @@ jobs: env: KVSTORE_ID: 1 run: | - # Explicitly configure the NodePort to make sure that it is different in - # each cluster, to workaround #24692 + # Let the NodePort to be selected randomly, to prevent the risk of conflicts. cilium --context ${{ env.contextName1 }} install \ ${{ steps.vars.outputs.cilium_install_defaults }} \ --helm-set cluster.name=${{ env.ciliumClusterName1 }} \ --helm-set cluster.id=1 \ - --helm-set clustermesh.apiserver.service.nodePort=32379 \ + --helm-set clustermesh.apiserver.service.nodePort=0 \ --helm-set clustermesh.apiserver.tls.authMode=${{ matrix.cm-auth-mode-1 }} \ ${{ steps.vars.outputs.cilium_install_multipool_ipam_cluster1 }} \ ${{ steps.kvstore.outputs.cilium_install_kvstore }} \ @@ -589,13 +587,12 @@ jobs: env: KVSTORE_ID: 2 run: | - # Explicitly configure the NodePort to make sure that it is different in - # each cluster, to workaround #24692 + # Let the NodePort to be selected randomly, to prevent the risk of conflicts. cilium --context ${{ env.contextName2 }} install \ ${{ steps.vars.outputs.cilium_install_defaults }} \ --helm-set cluster.name=${{ env.ciliumClusterName2 }} \ --helm-set cluster.id=${{ matrix.maxConnectedClusters }} \ - --helm-set clustermesh.apiserver.service.nodePort=32380 \ + --helm-set clustermesh.apiserver.service.nodePort=0 \ --helm-set clustermesh.apiserver.tls.authMode=${{ matrix.cm-auth-mode-2 }} \ ${{ steps.vars.outputs.cilium_install_multipool_ipam_cluster2 }} \ ${{ steps.kvstore.outputs.cilium_install_kvstore }} \ diff --git a/.github/workflows/conformance-delegated-ipam.yaml b/.github/workflows/conformance-delegated-ipam.yaml index 895377e5fe7dd..57402dfeb0db3 100644 --- a/.github/workflows/conformance-delegated-ipam.yaml +++ b/.github/workflows/conformance-delegated-ipam.yaml @@ -22,7 +22,6 @@ on: branches: - main - ft/main/** - - 'renovate/main-**' paths-ignore: - 'Documentation/**' diff --git a/.github/workflows/conformance-eks.yaml b/.github/workflows/conformance-eks.yaml index 1d308c2b5ef1e..5d2105e6ddf4f 100644 --- a/.github/workflows/conformance-eks.yaml +++ b/.github/workflows/conformance-eks.yaml @@ -2,6 +2,34 @@ name: Conformance EKS (ci-eks) # Any change in triggers needs to be reflected in the concurrency group. on: + workflow_call: + inputs: + PR-number: + description: "Pull request number." + required: true + type: string + context-ref: + description: "Context in which the workflow runs. If PR is from a fork, will be the PR target branch (general case). If PR is NOT from a fork, will be the PR branch itself (this allows committers to test changes to workflows directly from PRs)." + required: true + type: string + SHA: + description: "SHA under test (head of the PR branch)." + required: true + type: string + extra-args: + description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." + required: false + default: '{}' + type: string + is-workflow-call: + description: "Distinguish if it's a workflow_call event." + required: false + type: boolean + default: true + secrets: + AWS_PR_ASSUME_ROLE: + required: true + workflow_dispatch: inputs: PR-number: @@ -17,12 +45,9 @@ on: description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." required: false default: '{}' - push: - branches: - - 'renovate/main-**' - # Run every 8 hours + # Run every 12 hours schedule: - - cron: '0 1/8 * * *' + - cron: '0 1/12 * * *' # By specifying the access of one of the scopes, all of those that are not # specified are set to 'none'. @@ -44,7 +69,10 @@ concurrency: # - Event type # - A unique identifier depending on event type: # - schedule: SHA - # - workflow_dispatch: PR number + # - workflow_dispatch: PR number. Because in workflow_call the parent context + # is used, we need to add a prefix name ('eks') for the concurrency group + # so that workflows that are executed in parallel don't conflict with each + # other. # # This structure ensures a unique concurrency group name is generated for each # type of testing, such that re-runs will cancel the previous run. @@ -52,9 +80,8 @@ concurrency: ${{ github.workflow }} ${{ github.event_name }} ${{ - (github.event_name == 'push' && github.sha) || - (github.event_name == 'schedule' && github.sha) || - (github.event_name == 'workflow_dispatch' && github.event.inputs.PR-number) + (github.event_name == 'schedule' && format('{0}-{1}', github.sha, 'eks')) || + (github.event_name == 'workflow_dispatch' && format('{0}-{1}', github.event.inputs.PR-number, 'eks')) }} cancel-in-progress: true @@ -64,7 +91,7 @@ env: # renovate: datasource=github-releases depName=eksctl-io/eksctl eksctl_version: v0.214.0 # renovate: datasource=github-releases depName=kubernetes/kubernetes - kubectl_version: v1.34.0 + kubectl_version: v1.34.1 jobs: echo-inputs: @@ -150,6 +177,15 @@ jobs: id: set-matrix run: | cp /tmp/matrix.json /tmp/result.json + + # Check if ipsec is requested via extra-args + IPSEC=$(echo '${{ inputs.extra-args }}' | jq -r '.["ipsec"] // false') + if [[ "$IPSEC" == "true" ]]; then + echo "::notice::IPsec enabled - setting ipsec=true for all matrix entries" + jq '.include |= map(. + {"ipsec": true})' /tmp/result.json > /tmp/result.json.tmp + mv /tmp/result.json.tmp /tmp/result.json + fi + jq -c '.include[]' /tmp/matrix.json | while read i; do VERSION=$(echo $i | jq -r '.version') aws eks describe-cluster-versions | jq -r '.clusterVersions[].clusterVersion' > /tmp/output @@ -223,7 +259,7 @@ jobs: CILIUM_INSTALL_DEFAULTS="${{ steps.default_vars.outputs.cilium_install_defaults }} \ --helm-set=cluster.name=${{ env.clusterName }} \ --helm-set=hubble.relay.enabled=true \ - --helm-set loadBalancer.l7.backend=envoy \ + --helm-set=loadBalancer.l7.backend=envoy \ --wait=false" if [[ "${{ matrix.ipsec }}" == "true" ]]; then CILIUM_INSTALL_DEFAULTS+=" --helm-set encryption.enabled=true --helm-set encryption.type=ipsec" @@ -381,7 +417,7 @@ jobs: merge-upload-and-status: name: Merge Upload and Status - if: ${{ always() }} + if: ${{ always() && !inputs.is-workflow-call }} needs: installation-and-connectivity uses: ./.github/workflows/common-post-jobs.yaml secrets: inherit diff --git a/.github/workflows/conformance-gateway-api.yaml b/.github/workflows/conformance-gateway-api.yaml index 2d64fa3a35349..752ba33ae0f81 100644 --- a/.github/workflows/conformance-gateway-api.yaml +++ b/.github/workflows/conformance-gateway-api.yaml @@ -22,7 +22,6 @@ on: branches: - main - ft/main/** - - 'renovate/main-**' paths-ignore: - 'Documentation/**' - 'test/**' diff --git a/.github/workflows/conformance-ginkgo.yaml b/.github/workflows/conformance-ginkgo.yaml index 46e0ff3ca93a2..2c589f17e76ad 100644 --- a/.github/workflows/conformance-ginkgo.yaml +++ b/.github/workflows/conformance-ginkgo.yaml @@ -17,9 +17,6 @@ on: description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." required: false default: '{}' - push: - branches: - - 'renovate/main-**' # Run every 8 hours schedule: - cron: '0 1/8 * * *' @@ -50,7 +47,6 @@ concurrency: ${{ github.workflow }} ${{ github.event_name }} ${{ - (github.event_name == 'push' && github.sha) || (github.event_name == 'schedule' && github.sha) || (github.event_name == 'workflow_dispatch' && github.event.inputs.PR-number) }} diff --git a/.github/workflows/conformance-gke.yaml b/.github/workflows/conformance-gke.yaml index 7b1e0193aa80e..b6c4023a709ae 100644 --- a/.github/workflows/conformance-gke.yaml +++ b/.github/workflows/conformance-gke.yaml @@ -2,6 +2,31 @@ name: Conformance GKE (ci-gke) # Any change in triggers needs to be reflected in the concurrency group. on: + workflow_call: + inputs: + PR-number: + description: "Pull request number." + required: false + type: string + context-ref: + description: "Context in which the workflow runs. If PR is from a fork, will be the PR target branch (general case). If PR is NOT from a fork, will be the PR branch itself (this allows committers to test changes to workflows directly from PRs)." + required: false + type: string + SHA: + description: "SHA under test (head of the PR branch)." + required: false + type: string + extra-args: + description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." + required: false + type: string + default: '{}' + is-workflow-call: + description: "Distinguish if it's a workflow_call event." + required: false + type: boolean + default: true + workflow_dispatch: inputs: PR-number: @@ -17,12 +42,9 @@ on: description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." required: false default: '{}' - push: - branches: - - 'renovate/main-**' - # Run every 8 hours + # Run every 12 hours schedule: - - cron: '0 2/8 * * *' + - cron: '0 3/12 * * *' # By specifying the access of one of the scopes, all of those that are not # specified are set to 'none'. @@ -44,7 +66,10 @@ concurrency: # - Event type # - A unique identifier depending on event type: # - schedule: SHA - # - workflow_dispatch: PR number + # - workflow_dispatch: PR number. Because in workflow_call the parent context + # is used, we need to add a prefix name ('gke') for the concurrency group + # so that workflows that are executed in parallel don't conflict with each + # other. # # This structure ensures a unique concurrency group name is generated for each # type of testing, such that re-runs will cancel the previous run. @@ -52,9 +77,8 @@ concurrency: ${{ github.workflow }} ${{ github.event_name }} ${{ - (github.event_name == 'push' && github.sha) || - (github.event_name == 'schedule' && github.sha) || - (github.event_name == 'workflow_dispatch' && github.event.inputs.PR-number) + (github.event_name == 'schedule' && format('{0}-{1}', github.sha, 'gke')) || + (github.event_name == 'workflow_dispatch' && format('{0}-{1}', github.event.inputs.PR-number, 'gke')) }} cancel-in-progress: true @@ -62,7 +86,7 @@ env: clusterName: ${{ github.repository_owner }}-${{ github.event.repository.name }}-${{ github.run_id }}-${{ github.run_attempt }} USE_GKE_GCLOUD_AUTH_PLUGIN: True # renovate: datasource=docker depName=google/cloud-sdk - gcloud_version: 537.0.0 + gcloud_version: 539.0.0 jobs: echo-inputs: @@ -120,6 +144,8 @@ jobs: # main -> event_name = schedule # other stable branches -> PR-number starting with v (e.g. v1.14) VERSIONS=$(echo ${{ inputs.extra-args }} | awk -F'=' '{print $2}') + IPSEC=$(echo '${{ inputs.extra-args }}' | jq -r '.["ipsec"] // empty') + # shellcheck disable=SC2193 if [[ "${{ github.event_name }}" == "schedule" || "${{ inputs.PR-number }}" == v* || "$VERSIONS" == "all" ]];then cp gke.json /tmp/matrix.json @@ -127,6 +153,23 @@ jobs: jq '{ "k8s": [ .k8s[] | select(.default) ], "config": .config}' gke.json > /tmp/matrix.json fi + # Add IPsec options to all configurations if ipsec is true + if [[ "$IPSEC" == "true" ]]; then + echo "::notice::IPsec enabled - setting ipsec=true for all matrix entries" + echo "Adding IPsec encryption options to all configurations" + jq '.config |= map( + # Add -ipsec suffix to type unless it already contains ipsec + .type = (if (.type | test("ipsec")) then .type else .type + "-ipsec" end) | + # Add IPsec helm options to cilium-install-opts + if .["cilium-install-opts"] then + .["cilium-install-opts"] += " --helm-set=encryption.enabled=true --helm-set=encryption.type=ipsec" + else + .["cilium-install-opts"] = "--helm-set=encryption.enabled=true --helm-set=encryption.type=ipsec" + end + )' /tmp/matrix.json > /tmp/matrix.json.tmp + mv /tmp/matrix.json.tmp /tmp/matrix.json + fi + echo "Generated matrix:" cat /tmp/matrix.json @@ -254,7 +297,7 @@ jobs: --helm-set=cluster.name=${{ env.clusterName }}-${{ matrix.config.index }} \ --helm-set=hubble.relay.enabled=true \ --helm-set=agentNotReadyTaintKey=ignore-taint.cluster-autoscaler.kubernetes.io/cilium-agent-not-ready \ - --helm-set loadBalancer.l7.backend=envoy \ + --helm-set=loadBalancer.l7.backend=envoy \ --wait=false" CONNECTIVITY_TEST_DEFAULTS="${{ steps.e2e_config.outputs.test_flags }}" @@ -338,7 +381,7 @@ jobs: install/kubernetes/cilium - name: Create custom IPsec secret - if: ${{ matrix.config.type == 'ipsec' || matrix.config.type == 'tunnel-ipsec' }} + if: ${{ contains(fromJSON('["no-tunnel-ipsec", "tunnel-ipsec", "tunnel-ingress-controller-ipsec"]'), matrix.config.type) }} run: | cilium encrypt create-key --auth-algo rfc4106-gcm-aes @@ -367,7 +410,7 @@ jobs: if: ${{ always() }} uses: ./.github/actions/post-logic with: - artifacts_suffix: "${{ env.job_name }} (${{ join(matrix.k8s.*, ', ') }}, ${{ join(matrix.config.*, ', ') }})" + artifacts_suffix: "${{ env.job_name }} (${{ join(matrix.k8s.*, ', ') }}, ${{ matrix.config.type }})" job_status: "${{ job.status }}" - name: Clean up ESP allow firewall rule @@ -388,7 +431,7 @@ jobs: merge-upload-and-status: name: Merge Upload and Status - if: ${{ always() }} + if: ${{ always() && !inputs.is-workflow-call }} needs: installation-and-connectivity uses: ./.github/workflows/common-post-jobs.yaml secrets: inherit diff --git a/.github/workflows/conformance-ingress.yaml b/.github/workflows/conformance-ingress.yaml index f684ea3c31d7f..5a3544471fb46 100644 --- a/.github/workflows/conformance-ingress.yaml +++ b/.github/workflows/conformance-ingress.yaml @@ -21,7 +21,6 @@ on: branches: - main - ft/main/** - - 'renovate/main-**' paths-ignore: - 'Documentation/**' - 'test/**' diff --git a/.github/workflows/conformance-ipsec-e2e.yaml b/.github/workflows/conformance-ipsec-e2e.yaml index 3d7ad49292aeb..a81fc69de1e3c 100644 --- a/.github/workflows/conformance-ipsec-e2e.yaml +++ b/.github/workflows/conformance-ipsec-e2e.yaml @@ -17,9 +17,6 @@ on: description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." required: false default: '{}' - push: - branches: - - 'renovate/main-**' # Run every 8 hours schedule: - cron: '0 5/8 * * *' @@ -50,7 +47,6 @@ concurrency: ${{ github.workflow }} ${{ github.event_name }} ${{ - (github.event_name == 'push' && github.sha) || (github.event_name == 'schedule' && github.sha) || (github.event_name == 'workflow_dispatch' && github.event.inputs.PR-number) }} diff --git a/.github/workflows/conformance-ipsec.yaml b/.github/workflows/conformance-ipsec.yaml new file mode 100644 index 0000000000000..927fd6ecb1dd8 --- /dev/null +++ b/.github/workflows/conformance-ipsec.yaml @@ -0,0 +1,138 @@ +name: Conformance IPsec (ci-ipsec) + +# Any change in triggers needs to be reflected in the concurrency group. +on: + workflow_dispatch: + inputs: + PR-number: + description: "Pull request number." + required: true + context-ref: + description: "Context in which the workflow runs. If PR is from a fork, will be the PR target branch (general case). If PR is NOT from a fork, will be the PR branch itself (this allows committers to test changes to workflows directly from PRs)." + required: true + SHA: + description: "SHA under test (head of the PR branch)." + required: true + extra-args: + description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." + required: false + default: '{}' + # Run every 12 hours + schedule: + - cron: '0 4/12 * * *' + +# By specifying the access of one of the scopes, all of those that are not +# specified are set to 'none'. +permissions: + # To read actions state with catchpoint/workflow-telemetry-action + actions: read + # To be able to access the repository with actions/checkout + contents: read + # To allow retrieving information from the PR API + pull-requests: read + # To be able to set commit status + statuses: write + # To be able to request the JWT from GitHub's OIDC provider + id-token: write + +concurrency: + # Structure: + # - Parent concurrency group name to avoid deadlock with child workflows + # - Workflow name + # - Event type + # - A unique identifier depending on event type: + # - schedule: SHA + # - workflow_dispatch: PR number + # + # This structure ensures a unique concurrency group name is generated for each + # type of testing, such that re-runs will cancel the previous run. + group: | + parent + ${{ github.workflow }} + ${{ github.event_name }} + ${{ + (github.event_name == 'push' && github.sha) || + (github.event_name == 'schedule' && github.sha) || + (github.event_name == 'workflow_dispatch' && github.event.inputs.PR-number) + }} + cancel-in-progress: true + +jobs: + echo-inputs: + if: ${{ github.event_name == 'workflow_dispatch' }} + name: Echo Workflow Dispatch Inputs + runs-on: ubuntu-24.04 + steps: + - name: Echo Workflow Dispatch Inputs + run: | + echo '${{ tojson(inputs) }}' + + commit-status-start: + name: Commit Status Start + runs-on: ubuntu-24.04 + steps: + - name: Set initial commit status + uses: myrotvorets/set-commit-status-action@3730c0a348a2ace3c110851bed53331bc6406e9f # v2.0.1 + with: + sha: ${{ inputs.SHA || github.sha }} + + wait-for-images: + name: Wait for images + runs-on: ubuntu-24.04 + timeout-minutes: 30 + steps: + - name: Checkout context ref (trusted) + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + ref: ${{ inputs.context-ref || github.sha }} + persist-credentials: false + + - name: Wait for images + uses: ./.github/actions/wait-for-images + with: + SHA: ${{ inputs.SHA || github.sha }} + images: cilium-ci operator-generic-ci hubble-relay-ci + + conformance-aks-ipsec: + name: Conformance AKS with IPsec + needs: wait-for-images + uses: ./.github/workflows/conformance-aks.yaml + secrets: inherit + with: + PR-number: ${{ inputs.PR-number || github.ref_name }} + context-ref: ${{ inputs.context-ref || github.sha }} + SHA: ${{ inputs.SHA || github.sha }} + extra-args: '{"ipsec": true}' + + conformance-eks-ipsec: + name: Conformance EKS with IPsec + needs: wait-for-images + uses: ./.github/workflows/conformance-eks.yaml + secrets: inherit + with: + PR-number: ${{ inputs.PR-number || github.ref_name }} + context-ref: ${{ inputs.context-ref || github.sha }} + SHA: ${{ inputs.SHA || github.sha }} + extra-args: '{"ipsec": true}' + + conformance-gke-ipsec: + name: Conformance GKE with IPsec + needs: wait-for-images + uses: ./.github/workflows/conformance-gke.yaml + secrets: inherit + with: + PR-number: ${{ inputs.PR-number || github.ref_name }} + context-ref: ${{ inputs.context-ref || github.sha }} + SHA: ${{ inputs.SHA || github.sha }} + extra-args: '{"ipsec": true}' + + merge-upload-and-status: + name: Merge Upload and Status + if: ${{ always() }} + needs: [conformance-aks-ipsec, conformance-eks-ipsec, conformance-gke-ipsec] + uses: ./.github/workflows/common-post-jobs.yaml + secrets: inherit + with: + context-ref: ${{ inputs.context-ref || github.sha }} + sha: ${{ inputs.SHA || github.sha }} + result: ${{ (needs.conformance-aks-ipsec.result == 'failure' || needs.conformance-eks-ipsec.result == 'failure' || needs.conformance-gke-ipsec.result == 'failure') && 'failure' || 'success' }} diff --git a/.github/workflows/conformance-multi-pool.yaml b/.github/workflows/conformance-multi-pool.yaml index 8d7fddf068a05..82a951da2117c 100644 --- a/.github/workflows/conformance-multi-pool.yaml +++ b/.github/workflows/conformance-multi-pool.yaml @@ -22,7 +22,6 @@ on: branches: - main - ft/main/** - - 'renovate/main-**' paths-ignore: - 'Documentation/**' diff --git a/.github/workflows/conformance-runtime.yaml b/.github/workflows/conformance-runtime.yaml index 7022a327f5ecc..52f3cce10b577 100644 --- a/.github/workflows/conformance-runtime.yaml +++ b/.github/workflows/conformance-runtime.yaml @@ -21,7 +21,6 @@ on: branches: - main - ft/main/** - - 'renovate/main-**' paths-ignore: - 'Documentation/**' # Run every 8 hours diff --git a/.github/workflows/hubble-cli-integration-test.yaml b/.github/workflows/hubble-cli-integration-test.yaml index c21a0c1bcd8d1..c1c94cf421c1b 100644 --- a/.github/workflows/hubble-cli-integration-test.yaml +++ b/.github/workflows/hubble-cli-integration-test.yaml @@ -21,7 +21,6 @@ on: branches: - main - ft/main/** - - 'renovate/main-**' paths-ignore: - 'Documentation/**' diff --git a/.github/workflows/integration-test.yaml b/.github/workflows/integration-test.yaml index 56af2a60f58f0..b0a0fbd8918b3 100644 --- a/.github/workflows/integration-test.yaml +++ b/.github/workflows/integration-test.yaml @@ -21,7 +21,6 @@ on: branches: - main - ft/main/** - - 'renovate/main-**' paths-ignore: - 'Documentation/**' # Run every 8 hours @@ -100,7 +99,7 @@ jobs: if: ${{ startsWith(matrix.arch, 'ubuntu-latest') || startsWith(matrix.arch, 'ubuntu-24.04') }} shell: bash run: | - # This is required for libtinfo5 + # This is required for libtinfo6 uri="http://security.ubuntu.com/ubuntu" arch="amd64" if [ ${{ runner.arch }} == "ARM64" ]; then @@ -120,7 +119,7 @@ jobs: - name: Install Dependencies shell: bash run: | - sudo apt update && sudo apt install -y --no-install-recommends build-essential make libtinfo5 + sudo apt update && sudo apt install -y --no-install-recommends build-essential make libtinfo6 - name: Checkout context ref (trusted) uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 diff --git a/.github/workflows/net-perf-gke.yaml b/.github/workflows/net-perf-gke.yaml index 56b5cc6a16ae5..c974b829fc139 100644 --- a/.github/workflows/net-perf-gke.yaml +++ b/.github/workflows/net-perf-gke.yaml @@ -65,7 +65,7 @@ env: USE_GKE_GCLOUD_AUTH_PLUGIN: True gcp_zone: us-east5-a # renovate: datasource=docker depName=google/cloud-sdk - gcloud_version: 537.0.0 + gcloud_version: 539.0.0 jobs: echo-inputs: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 330319253ea19..6f2adbba53f63 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -27,6 +27,11 @@ on: description: 'Which version are you releasing? (e.g. vX.Y.Z[-(pre|rc).W])' required: true type: string + secrets: + CILIUM_RELEASE_BOT_PEM: + required: true + CILIUM_RELEASE_BOT_APP_ID: + required: true permissions: # To be able to access the repository with `actions/checkout` diff --git a/.github/workflows/renovate-config-validator.yaml b/.github/workflows/renovate-config-validator.yaml index 7bf7fb3f55d05..efb59cde85fbe 100644 --- a/.github/workflows/renovate-config-validator.yaml +++ b/.github/workflows/renovate-config-validator.yaml @@ -18,7 +18,7 @@ jobs: - name: Validate configuration run: | # renovate: datasource=docker - export RENOVATE_IMAGE=ghcr.io/renovatebot/renovate:41.97.7@sha256:e9016393f1deb97b58bd3d79606dbc166d9a308f24632af7a4f5af5b6f4640f2 + export RENOVATE_IMAGE=ghcr.io/renovatebot/renovate:41.122.3@sha256:665602e84a6f9f2f6d062cf3dfa1c3c4720846ec7a87791f96dfd5f25c73435f docker run --rm --entrypoint "renovate-config-validator" \ -v "${{ github.workspace }}/.github/renovate.json5":"/renovate.json5" \ ${RENOVATE_IMAGE} "/renovate.json5" diff --git a/.github/workflows/renovate.yaml b/.github/workflows/renovate.yaml index a9654edd641d5..1883fbe72f312 100644 --- a/.github/workflows/renovate.yaml +++ b/.github/workflows/renovate.yaml @@ -34,13 +34,13 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Self-hosted Renovate - uses: renovatebot/github-action@7876d7a812254599d262d62b6b2c2706018258a2 # v43.0.10 + uses: renovatebot/github-action@f8af9272cd94a4637c29f60dea8731afd3134473 # v43.0.12 env: # default to DEBUG log level, this is always useful LOG_LEVEL: ${{ github.event.inputs.renovate_log_level_debug == 'false' && 'INFO' || 'DEBUG' }} with: # renovate: datasource=github-releases depName=renovatebot/renovate - renovate-version: 41.97.7 + renovate-version: 41.122.3 docker-user: root docker-cmd-file: .github/actions/renovate/entrypoint.sh configurationFile: .github/renovate.json5 diff --git a/.github/workflows/scale-cleanup-kops.yaml b/.github/workflows/scale-cleanup-kops.yaml index 871a4dcaf860f..8ba303af3f73c 100644 --- a/.github/workflows/scale-cleanup-kops.yaml +++ b/.github/workflows/scale-cleanup-kops.yaml @@ -26,7 +26,7 @@ env: # renovate: datasource=golang-version depName=go go_version: 1.25.1 # renovate: datasource=docker depName=google/cloud-sdk - gcloud_version: 537.0.0 + gcloud_version: 539.0.0 jobs: cleanup-kops-clusters: diff --git a/.github/workflows/scale-test-100-gce.yaml b/.github/workflows/scale-test-100-gce.yaml index b1cf7a2377cd5..a4bed53ba662b 100644 --- a/.github/workflows/scale-test-100-gce.yaml +++ b/.github/workflows/scale-test-100-gce.yaml @@ -62,7 +62,7 @@ env: test_name: scale-100 cluster_name: ${{ github.run_id }}-${{ github.run_attempt }} # renovate: datasource=docker depName=google/cloud-sdk - gcloud_version: 537.0.0 + gcloud_version: 539.0.0 jobs: echo-inputs: diff --git a/.github/workflows/scale-test-5-gce.yaml b/.github/workflows/scale-test-5-gce.yaml index a8900e012d7b7..a9f8f5224830d 100644 --- a/.github/workflows/scale-test-5-gce.yaml +++ b/.github/workflows/scale-test-5-gce.yaml @@ -64,7 +64,7 @@ env: test_name: scale-5 cluster_name: ${{ github.run_id }}-${{ github.run_attempt }} # renovate: datasource=docker depName=google/cloud-sdk - gcloud_version: 537.0.0 + gcloud_version: 539.0.0 jobs: echo-inputs: diff --git a/.github/workflows/scale-test-clustermesh.yaml b/.github/workflows/scale-test-clustermesh.yaml index 71b2453f9e405..2b046e9eb9e0e 100644 --- a/.github/workflows/scale-test-clustermesh.yaml +++ b/.github/workflows/scale-test-clustermesh.yaml @@ -60,9 +60,9 @@ env: # renovate: datasource=golang-version depName=go go_version: 1.25.1 # renovate: datasource=docker depName=google/cloud-sdk - gcloud_version: 537.0.0 + gcloud_version: 539.0.0 # renovate: datasource=git-refs depName=https://github.com/cilium/scaffolding branch=main - cmapisrv_mock_ref: 38f6ed0ddf54dc548187536c385d2d722e265dad + cmapisrv_mock_ref: 6bc0c7cb2ea7cd7d9aa071f691e218669aaa1c40 test_name: scale-clustermesh cluster_name: ${{ github.run_id }}-${{ github.run_attempt }} diff --git a/.github/workflows/scale-test-egw.yaml b/.github/workflows/scale-test-egw.yaml index c77bc3e9c3bd8..8686f1b02ebc2 100644 --- a/.github/workflows/scale-test-egw.yaml +++ b/.github/workflows/scale-test-egw.yaml @@ -72,14 +72,14 @@ env: # renovate: datasource=github-releases depName=eksctl-io/eksctl eksctl_version: v0.214.0 # renovate: datasource=github-releases depName=kubernetes/kubernetes - kubectl_version: v1.34.0 + kubectl_version: v1.34.1 # renovate: datasource=docker depName=google/cloud-sdk - gcloud_version: 537.0.0 + gcloud_version: 539.0.0 # Hosted under quay.io/cilium/egw-scale-utils and built by # a workflow in cilium/scaffolding. # renovate: datasource=git-refs depName=https://github.com/cilium/scaffolding branch=main - egw_utils_ref: 38f6ed0ddf54dc548187536c385d2d722e265dad + egw_utils_ref: 6bc0c7cb2ea7cd7d9aa071f691e218669aaa1c40 test_name: egw cluster_name: ${{ github.run_id }}-${{ github.run_attempt }} diff --git a/.github/workflows/scale-test-node-throughput-gce.yaml b/.github/workflows/scale-test-node-throughput-gce.yaml index 3dd9fb779f684..6e86366080818 100644 --- a/.github/workflows/scale-test-node-throughput-gce.yaml +++ b/.github/workflows/scale-test-node-throughput-gce.yaml @@ -43,7 +43,7 @@ env: cluster_name: ${{ github.run_id }}-${{ github.run_attempt }} GCP_PERF_RESULTS_BUCKET: gs://cilium-scale-results # renovate: datasource=docker depName=google/cloud-sdk - gcloud_version: 537.0.0 + gcloud_version: 539.0.0 jobs: install-and-scaletest: diff --git a/.github/workflows/tests-clustermesh-upgrade.yaml b/.github/workflows/tests-clustermesh-upgrade.yaml index 8e648f5dc16a0..ec0972688153d 100644 --- a/.github/workflows/tests-clustermesh-upgrade.yaml +++ b/.github/workflows/tests-clustermesh-upgrade.yaml @@ -22,7 +22,6 @@ on: branches: - main - ft/main/** - - 'renovate/main-**' paths-ignore: - 'Documentation/**' diff --git a/.github/workflows/tests-datapath-verifier.yaml b/.github/workflows/tests-datapath-verifier.yaml index 2178cd3c46788..34a7ac70f5283 100644 --- a/.github/workflows/tests-datapath-verifier.yaml +++ b/.github/workflows/tests-datapath-verifier.yaml @@ -17,9 +17,6 @@ on: description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." required: false default: '{}' - push: - branches: - - 'renovate/main-**' # Run every 8 hours schedule: - cron: '0 5/8 * * *' @@ -50,7 +47,6 @@ concurrency: ${{ github.workflow }} ${{ github.event_name }} ${{ - (github.event_name == 'push' && github.sha) || (github.event_name == 'schedule' && github.sha) || (github.event_name == 'workflow_dispatch' && github.event.inputs.PR-number) }} @@ -90,22 +86,22 @@ jobs: - kernel: 'rhel8.6-20250812.093650' ci-kernel: '510' # renovate: datasource=docker depName=quay.io/lvh-images/complexity-test - - kernel: '5.10-20250812.093650' + - kernel: '5.10-20250917.183741' ci-kernel: '510' # renovate: datasource=docker depName=quay.io/lvh-images/complexity-test - - kernel: '5.15-20250812.093650' + - kernel: '5.15-20250917.183741' ci-kernel: '510' # renovate: datasource=docker depName=quay.io/lvh-images/complexity-test - - kernel: '6.1-20250812.093650' + - kernel: '6.1-20250917.183741' ci-kernel: '61' # renovate: datasource=docker depName=quay.io/lvh-images/complexity-test - - kernel: '6.6-20250812.093650' + - kernel: '6.6-20250917.183741' ci-kernel: '61' # renovate: datasource=docker depName=quay.io/lvh-images/complexity-test - - kernel: '6.12-20250812.093650' + - kernel: '6.12-20250917.183741' ci-kernel: '61' # renovate: datasource=docker depName=quay.io/lvh-images/complexity-test - - kernel: 'bpf-next-20250812.093650' + - kernel: 'bpf-next-20250917.183741' ci-kernel: 'netnext' timeout-minutes: 60 steps: diff --git a/.github/workflows/tests-e2e-upgrade.yaml b/.github/workflows/tests-e2e-upgrade.yaml index 0ac04911c3e05..012140ec33736 100644 --- a/.github/workflows/tests-e2e-upgrade.yaml +++ b/.github/workflows/tests-e2e-upgrade.yaml @@ -17,9 +17,6 @@ on: description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." required: false default: '{}' - push: - branches: - - 'renovate/main-**' # Run every 8 hours schedule: - cron: '0 5/8 * * *' @@ -50,7 +47,6 @@ concurrency: ${{ github.workflow }} ${{ github.event_name }} ${{ - (github.event_name == 'push' && github.sha) || (github.event_name == 'schedule' && github.sha) || (github.event_name == 'workflow_dispatch' && github.event.inputs.PR-number) }} diff --git a/.github/workflows/tests-ipsec-upgrade.yaml b/.github/workflows/tests-ipsec-upgrade.yaml index 8433fb1255ef0..164588723d508 100644 --- a/.github/workflows/tests-ipsec-upgrade.yaml +++ b/.github/workflows/tests-ipsec-upgrade.yaml @@ -17,9 +17,6 @@ on: description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow." required: false default: '{}' - push: - branches: - - 'renovate/main-**' # By specifying the access of one of the scopes, all of those that are not # specified are set to 'none'. @@ -47,7 +44,6 @@ concurrency: ${{ github.workflow }} ${{ github.event_name }} ${{ - (github.event_name == 'push' && github.sha) || (github.event_name == 'schedule' && github.sha) || (github.event_name == 'workflow_dispatch' && github.event.inputs.PR-number) }} diff --git a/.github/workflows/wait-for-status-check.yaml b/.github/workflows/wait-for-status-check.yaml new file mode 100644 index 0000000000000..6be1054f5107f --- /dev/null +++ b/.github/workflows/wait-for-status-check.yaml @@ -0,0 +1,122 @@ +name: Wait for Lint Checks + +on: + workflow_call: + inputs: + if: + description: "Condition to run the wait (true/false)" + type: boolean + default: true + sha: + description: "SHA to check for lint workflow completion" + required: true + type: string + lint-workflows: + description: "Comma-separated list of lint workflow filenames to wait for" + required: false + type: string + default: "lint-images-base.yaml" + timeout-minutes: + description: "Maximum time to wait for lint workflows (in minutes)" + required: false + type: number + default: 30 + poll-interval: + description: "Polling interval in seconds" + required: false + type: number + default: 30 + +permissions: + actions: read + contents: read + +jobs: + wait-for-lint: + name: Wait for lint checks + runs-on: ubuntu-24.04 + timeout-minutes: ${{ inputs.timeout-minutes }} + steps: + - name: Wait for lint workflows to complete + if: ${{ inputs.if }} + env: + GH_TOKEN: ${{ github.token }} + run: | + # Parse input parameters + check_sha="${{ inputs.sha }}" + lint_workflows_str="${{ inputs.lint-workflows }}" + poll_interval="${{ inputs.poll-interval }}" + timeout_minutes="${{ inputs.timeout-minutes }}" + + echo "Waiting for lint workflows to complete for SHA: $check_sha" + echo "Lint workflows to check: $lint_workflows_str" + echo "Poll interval: ${poll_interval}s, Timeout: ${timeout_minutes}m" + + # Convert comma-separated workflows to array + IFS=',' read -ra lint_workflows <<< "$lint_workflows_str" + + max_attempts=$((timeout_minutes * 60 / poll_interval)) + attempt=0 + + while [[ $attempt -lt $max_attempts ]]; do + attempt=$((attempt + 1)) + echo "Attempt $attempt/$max_attempts" + + all_successful=true + + for workflow in "${lint_workflows[@]}"; do + # Trim whitespace + workflow=$(echo "$workflow" | xargs) + echo "Checking workflow: $workflow" + + # Use gh CLI to get the latest run for this workflow and commit + run_status=$(gh --repo ${{ github.repository }} run list --workflow "$workflow" --commit "$check_sha" --limit 1 --json status,conclusion --jq '.[0] // empty') + + if [[ -z "$run_status" ]]; then + echo "No workflow run found for SHA $check_sha" + all_successful=false + continue + fi + + status=$(echo "$run_status" | jq -r '.status') + conclusion=$(echo "$run_status" | jq -r '.conclusion') + + echo "Workflow status: $status, conclusion: $conclusion" + + case "$status" in + "completed") + if [[ "$conclusion" == "success" ]]; then + echo "✅ Workflow completed successfully" + else + echo "❌ Lint workflow $workflow failed with conclusion: $conclusion" + exit 1 + fi + ;; + "in_progress"|"queued") + echo "⏳ Workflow is still running..." + all_successful=false + ;; + *) + echo "❓ Unknown workflow status: $status" + all_successful=false + ;; + esac + done + + if [[ "$all_successful" == "true" ]]; then + echo "✅ All lint workflows completed successfully!" + exit 0 + fi + + if [[ $attempt -lt $max_attempts ]]; then + echo "Waiting ${poll_interval} seconds before next check..." + sleep "$poll_interval" + fi + done + + echo "❌ Timeout waiting for lint workflows to complete" + exit 1 + + - name: Return successful + if: ${{ !inputs.if }} + run: echo "Skipping wait as per input condition" diff --git a/CODEOWNERS b/CODEOWNERS index aec3ca799cedf..399f9804a88cc 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -434,6 +434,7 @@ Makefile* @cilium/build /Documentation/network/servicemesh/ @cilium/sig-servicemesh @cilium/docs-structure /Documentation/observability/ @cilium/sig-policy @cilium/docs-structure /Documentation/observability/hubble* @cilium/sig-hubble @cilium/docs-structure +/Documentation/observability/metrics.rst @cilium/hubble-metrics @cilium/docs-structure /Documentation/operations/performance/ @cilium/sig-datapath @cilium/docs-structure /Documentation/operations/system_requirements.rst @cilium/sig-datapath @cilium/docs-structure /Documentation/operations/troubleshooting_clustermesh.rst @cilium/sig-clustermesh @cilium/docs-structure @@ -558,6 +559,7 @@ Makefile* @cilium/build /pkg/fswatcher/ @cilium/sig-datapath @cilium/sig-hubble /pkg/gops/ @cilium/sig-agent /pkg/health/ @cilium/sig-agent +/pkg/healthconfig/ @cilium/sig-agent /pkg/hive/ @cilium/sig-foundations /pkg/hubble/ @cilium/sig-hubble /pkg/hubble/metrics @cilium/hubble-metrics @@ -636,6 +638,7 @@ Makefile* @cilium/build /pkg/source @cilium/ipcache /pkg/spanstat/ @cilium/sig-agent /pkg/status/ @cilium/sig-agent +/pkg/svcrouteconfig/ @cilium/sig-datapath @cilium/sig-bgp /pkg/testutils/ @cilium/ci-structure /pkg/testutils/scriptnet @cilium/sig-foundations /pkg/time @cilium/sig-agent diff --git a/Documentation/cmdref/cilium-agent.md b/Documentation/cmdref/cilium-agent.md index c609d7849d054..d7181c8a825cd 100644 --- a/Documentation/cmdref/cilium-agent.md +++ b/Documentation/cmdref/cilium-agent.md @@ -143,7 +143,6 @@ cilium-agent [flags] --enable-ip-masq-agent Enable BPF ip-masq-agent --enable-ipip-termination Enable plain IPIP/IP6IP6 termination --enable-ipsec Enable IPsec - --enable-ipsec-encrypted-overlay Enable IPsec encrypted overlay. If enabled tunnel traffic will be encrypted before leaving the host. Requires ipsec and tunnel mode vxlan to be enabled. --enable-ipsec-key-watcher Enable watcher for IPsec key. If disabled, a restart of the agent will be necessary on key rotations. (default true) --enable-ipv4 Enable IPv4 support (default true) --enable-ipv4-big-tcp Enable IPv4 BIG TCP option which increases device's maximum GRO/GSO limits for IPv4 @@ -165,6 +164,7 @@ cilium-agent [flags] --enable-masquerade-to-route-source Masquerade packets to the source IP provided from the routing layer rather than interface address --enable-monitor Enable the monitor unix domain socket server (default true) --enable-nat46x64-gateway Enable NAT46 and NAT64 gateway + --enable-no-service-endpoints-routable Enable routes when service has 0 endpoints (default true) --enable-node-selector-labels Enable use of node label based identity --enable-pmtu-discovery Enable path MTU discovery to send ICMP fragmentation-needed replies to the client --enable-policy string Enable policy enforcement (default "default") @@ -231,6 +231,7 @@ cilium-agent [flags] --hubble-export-file-path stdout Filepath to write Hubble events to. By specifying stdout the flows are logged instead of written to a rotated file. --hubble-flowlogs-config-path string Filepath with configuration of hubble flowlogs --hubble-listen-address string An additional address for Hubble server to listen to, e.g. ":4244" + --hubble-lost-event-send-interval duration Interval at which lost events are sent from the Observer server, if any. (default 1s) --hubble-metrics string List of Hubble metrics to enable. --hubble-metrics-server string Address to serve Hubble metrics on. --hubble-metrics-server-enable-tls Run the Hubble metrics server on the given listen address with TLS. @@ -263,6 +264,7 @@ cilium-agent [flags] --install-no-conntrack-iptables-rules Install Iptables rules to skip netfilter connection tracking on all pod traffic. This option is only effective when Cilium is running in direct routing and full KPR mode. Moreover, this option cannot be enabled when Cilium is running in a managed Kubernetes environment or in a chained CNI setup. --install-uplink-routes-for-delegated-ipam Install ingress/egress routes through uplink on host for Pods when working with delegated IPAM plugin. --ip-masq-agent-config-path string ip-masq-agent configuration file path (default "/etc/config/ip-masq-agent") + --ip-tracing-option-type uint8 Specifies what IPv4 option type should be used to extract trace information from a packet; a value of 0 (default) disables IP tracing. --ipam string Backend to use for IPAM (default "cluster-pool") --ipam-cilium-node-update-rate duration Maximum rate at which the CiliumNode custom resource is updated (default 15s) --ipam-default-ip-pool string Name of the default IP Pool when using multi-pool (default "default") @@ -375,6 +377,7 @@ cilium-agent [flags] --route-metric int Overwrite the metric used by cilium when adding routes to its 'cilium_host' device --routing-mode string Routing mode ("native" or "tunnel") (default "tunnel") --service-no-backend-response string Response to traffic for a service without backends (default "reject") + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --socket-path string Sets daemon's socket path to listen for connections (default "/var/run/cilium/cilium.sock") --standalone-dns-proxy-server-port int Global port on which the gRPC server for standalone DNS proxy should listen (default 40045) --state-dir string Directory path to store runtime state (default "/var/run/cilium") @@ -408,6 +411,7 @@ cilium-agent [flags] --vtep-endpoint strings List of VTEP IP addresses --vtep-mac strings List of VTEP MAC addresses for forwarding traffic outside the cluster --vtep-mask string VTEP CIDR Mask for all VTEP CIDRs (default "255.255.255.0") + --vtep-policy-reconciliation-trigger-interval duration Time between triggers of vtep policy state reconciliations (default 1s) --wireguard-persistent-keepalive duration The Wireguard keepalive interval as a Go duration string --write-cni-conf-when-ready string Write the CNI configuration to the specified path when agent is ready ``` diff --git a/Documentation/cmdref/cilium-agent_hive.md b/Documentation/cmdref/cilium-agent_hive.md index a55011a3f984e..9612ce2c19bfc 100644 --- a/Documentation/cmdref/cilium-agent_hive.md +++ b/Documentation/cmdref/cilium-agent_hive.md @@ -61,15 +61,16 @@ cilium-agent hive [flags] --enable-drift-checker Enables support for config drift checker (default true) --enable-dynamic-config Enables support for dynamic agent config (default true) --enable-dynamic-lifecycle-manager Enables support for dynamic lifecycle management + --enable-endpoint-health-checking Enable connectivity health checking between virtual endpoints (default true) --enable-gateway-api Enables Envoy secret sync for Gateway API related TLS secrets --enable-gops Enable gops server (default true) --enable-health-check-nodeport Enables a healthcheck nodePort server for NodePort services with 'healthCheckNodePort' being set (default true) + --enable-health-checking Enable connectivity health checking (default true) --enable-hubble Enable hubble server --enable-hubble-open-metrics Enable exporting hubble metrics in OpenMetrics format. --enable-ingress-controller Enables Envoy secret sync for Ingress controller related TLS secrets --enable-ip-masq-agent Enable BPF ip-masq-agent --enable-ipsec Enable IPsec - --enable-ipsec-encrypted-overlay Enable IPsec encrypted overlay. If enabled tunnel traffic will be encrypted before leaving the host. Requires ipsec and tunnel mode vxlan to be enabled. --enable-ipsec-key-watcher Enable watcher for IPsec key. If disabled, a restart of the agent will be necessary on key rotations. (default true) --enable-ipv4-big-tcp Enable IPv4 BIG TCP option which increases device's maximum GRO/GSO limits for IPv4 --enable-ipv6-big-tcp Enable IPv6 BIG TCP option which increases device's maximum GRO/GSO limits for IPv6 @@ -78,6 +79,7 @@ cilium-agent hive [flags] --enable-l2-neigh-discovery Enables L2 neighbor discovery used by kube-proxy-replacement and IPsec --enable-l2-pod-announcements Enable announcing Pod IPs with Gratuitous ARP and NDP --enable-monitor Enable the monitor unix domain socket server (default true) + --enable-no-service-endpoints-routable Enable routes when service has 0 endpoints (default true) --enable-policy-secrets-sync Enables Envoy secret sync for Secrets used in CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy --enable-route-mtu-for-cni-chaining Enable route MTU for pod netns when CNI chaining is used --enable-service-topology Enable support for service topology aware hints @@ -124,6 +126,7 @@ cilium-agent hive [flags] --hubble-export-file-path stdout Filepath to write Hubble events to. By specifying stdout the flows are logged instead of written to a rotated file. --hubble-flowlogs-config-path string Filepath with configuration of hubble flowlogs --hubble-listen-address string An additional address for Hubble server to listen to, e.g. ":4244" + --hubble-lost-event-send-interval duration Interval at which lost events are sent from the Observer server, if any. (default 1s) --hubble-metrics string List of Hubble metrics to enable. --hubble-metrics-server string Address to serve Hubble metrics on. --hubble-metrics-server-enable-tls Run the Hubble metrics server on the given listen address with TLS. @@ -216,6 +219,7 @@ cilium-agent hive [flags] --proxy-xff-num-trusted-hops-ingress uint32 Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. --read-cni-conf string CNI configuration file to use as a source for --write-cni-conf-when-ready. If not supplied, a suitable one will be generated. --restored-proxy-ports-age-limit uint Time after which a restored proxy ports file is considered stale (in minutes) (default 15) + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --standalone-dns-proxy-server-port int Global port on which the gRPC server for standalone DNS proxy should listen (default 40045) --static-cnp-path string Directory path to watch and load static cilium network policy yaml files. --status-collector-failure-threshold duration The duration after which a probe is considered failed (default 1m0s) @@ -230,6 +234,7 @@ cilium-agent hive [flags] --tunnel-source-port-range string Tunnel source port range hint (default 0-0) (default "0-0") --underlay-protocol string IP family for the underlay ("ipv4" or "ipv6") (default "ipv4") --use-full-tls-context If enabled, persist ca.crt keys into the Envoy config even in a terminatingTLS block on an L7 Cilium Policy. This is to enable compatibility with previously buggy behaviour. This flag is deprecated and will be removed in a future release. + --vtep-policy-reconciliation-trigger-interval duration Time between triggers of vtep policy state reconciliations (default 1s) --wireguard-persistent-keepalive duration The Wireguard keepalive interval as a Go duration string --write-cni-conf-when-ready string Write the CNI configuration to the specified path when agent is ready ``` diff --git a/Documentation/cmdref/cilium-agent_hive_dot-graph.md b/Documentation/cmdref/cilium-agent_hive_dot-graph.md index 8a1be080652b4..7481b254c4e7f 100644 --- a/Documentation/cmdref/cilium-agent_hive_dot-graph.md +++ b/Documentation/cmdref/cilium-agent_hive_dot-graph.md @@ -67,15 +67,16 @@ cilium-agent hive dot-graph [flags] --enable-drift-checker Enables support for config drift checker (default true) --enable-dynamic-config Enables support for dynamic agent config (default true) --enable-dynamic-lifecycle-manager Enables support for dynamic lifecycle management + --enable-endpoint-health-checking Enable connectivity health checking between virtual endpoints (default true) --enable-gateway-api Enables Envoy secret sync for Gateway API related TLS secrets --enable-gops Enable gops server (default true) --enable-health-check-nodeport Enables a healthcheck nodePort server for NodePort services with 'healthCheckNodePort' being set (default true) + --enable-health-checking Enable connectivity health checking (default true) --enable-hubble Enable hubble server --enable-hubble-open-metrics Enable exporting hubble metrics in OpenMetrics format. --enable-ingress-controller Enables Envoy secret sync for Ingress controller related TLS secrets --enable-ip-masq-agent Enable BPF ip-masq-agent --enable-ipsec Enable IPsec - --enable-ipsec-encrypted-overlay Enable IPsec encrypted overlay. If enabled tunnel traffic will be encrypted before leaving the host. Requires ipsec and tunnel mode vxlan to be enabled. --enable-ipsec-key-watcher Enable watcher for IPsec key. If disabled, a restart of the agent will be necessary on key rotations. (default true) --enable-ipv4-big-tcp Enable IPv4 BIG TCP option which increases device's maximum GRO/GSO limits for IPv4 --enable-ipv6-big-tcp Enable IPv6 BIG TCP option which increases device's maximum GRO/GSO limits for IPv6 @@ -84,6 +85,7 @@ cilium-agent hive dot-graph [flags] --enable-l2-neigh-discovery Enables L2 neighbor discovery used by kube-proxy-replacement and IPsec --enable-l2-pod-announcements Enable announcing Pod IPs with Gratuitous ARP and NDP --enable-monitor Enable the monitor unix domain socket server (default true) + --enable-no-service-endpoints-routable Enable routes when service has 0 endpoints (default true) --enable-policy-secrets-sync Enables Envoy secret sync for Secrets used in CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy --enable-route-mtu-for-cni-chaining Enable route MTU for pod netns when CNI chaining is used --enable-service-topology Enable support for service topology aware hints @@ -129,6 +131,7 @@ cilium-agent hive dot-graph [flags] --hubble-export-file-path stdout Filepath to write Hubble events to. By specifying stdout the flows are logged instead of written to a rotated file. --hubble-flowlogs-config-path string Filepath with configuration of hubble flowlogs --hubble-listen-address string An additional address for Hubble server to listen to, e.g. ":4244" + --hubble-lost-event-send-interval duration Interval at which lost events are sent from the Observer server, if any. (default 1s) --hubble-metrics string List of Hubble metrics to enable. --hubble-metrics-server string Address to serve Hubble metrics on. --hubble-metrics-server-enable-tls Run the Hubble metrics server on the given listen address with TLS. @@ -221,6 +224,7 @@ cilium-agent hive dot-graph [flags] --proxy-xff-num-trusted-hops-ingress uint32 Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. --read-cni-conf string CNI configuration file to use as a source for --write-cni-conf-when-ready. If not supplied, a suitable one will be generated. --restored-proxy-ports-age-limit uint Time after which a restored proxy ports file is considered stale (in minutes) (default 15) + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --standalone-dns-proxy-server-port int Global port on which the gRPC server for standalone DNS proxy should listen (default 40045) --static-cnp-path string Directory path to watch and load static cilium network policy yaml files. --status-collector-failure-threshold duration The duration after which a probe is considered failed (default 1m0s) @@ -235,6 +239,7 @@ cilium-agent hive dot-graph [flags] --tunnel-source-port-range string Tunnel source port range hint (default 0-0) (default "0-0") --underlay-protocol string IP family for the underlay ("ipv4" or "ipv6") (default "ipv4") --use-full-tls-context If enabled, persist ca.crt keys into the Envoy config even in a terminatingTLS block on an L7 Cilium Policy. This is to enable compatibility with previously buggy behaviour. This flag is deprecated and will be removed in a future release. + --vtep-policy-reconciliation-trigger-interval duration Time between triggers of vtep policy state reconciliations (default 1s) --wireguard-persistent-keepalive duration The Wireguard keepalive interval as a Go duration string --write-cni-conf-when-ready string Write the CNI configuration to the specified path when agent is ready ``` diff --git a/Documentation/cmdref/cilium-dbg_bpf.md b/Documentation/cmdref/cilium-dbg_bpf.md index ff6b015d73d6e..b5e6ae0f93f3b 100644 --- a/Documentation/cmdref/cilium-dbg_bpf.md +++ b/Documentation/cmdref/cilium-dbg_bpf.md @@ -43,4 +43,5 @@ Direct access to local BPF maps * [cilium-dbg bpf socknat](cilium-dbg_bpf_socknat.md) - Socket NAT operations * [cilium-dbg bpf srv6](cilium-dbg_bpf_srv6.md) - Manage the SRv6 routing rules * [cilium-dbg bpf vtep](cilium-dbg_bpf_vtep.md) - Manage the VTEP mappings for IP/CIDR <-> VTEP MAC/IP +* [cilium-dbg bpf vtep-policy](cilium-dbg_bpf_vtep-policy.md) - Manage the VTEP Policy mappings diff --git a/Documentation/cmdref/cilium-dbg_bpf_vtep-policy.md b/Documentation/cmdref/cilium-dbg_bpf_vtep-policy.md new file mode 100644 index 0000000000000..fb5947ed6552c --- /dev/null +++ b/Documentation/cmdref/cilium-dbg_bpf_vtep-policy.md @@ -0,0 +1,29 @@ + + +## cilium-dbg bpf vtep-policy + +Manage the VTEP Policy mappings + +### Options + +``` + -h, --help help for vtep-policy +``` + +### Options inherited from parent commands + +``` + --config string Config file (default is $HOME/.cilium.yaml) + -D, --debug Enable debug messages + -H, --host string URI to server-side API + --log-driver strings Logging endpoints to use (example: syslog) + --log-opt map Log driver options (example: format=json) +``` + +### SEE ALSO + +* [cilium-dbg bpf](cilium-dbg_bpf.md) - Direct access to local BPF maps +* [cilium-dbg bpf vtep-policy delete](cilium-dbg_bpf_vtep-policy_delete.md) - Delete VTEP Policy entries +* [cilium-dbg bpf vtep-policy list](cilium-dbg_bpf_vtep-policy_list.md) - List VTEP Policy entries +* [cilium-dbg bpf vtep-policy update](cilium-dbg_bpf_vtep-policy_update.md) - Update VTEP Policy entries + diff --git a/Documentation/cmdref/cilium-dbg_bpf_vtep-policy_delete.md b/Documentation/cmdref/cilium-dbg_bpf_vtep-policy_delete.md new file mode 100644 index 0000000000000..0d6aa4f2c9af4 --- /dev/null +++ b/Documentation/cmdref/cilium-dbg_bpf_vtep-policy_delete.md @@ -0,0 +1,35 @@ + + +## cilium-dbg bpf vtep-policy delete + +Delete VTEP Policy entries + +### Synopsis + +Delete vtep entries using vtep CIDR. + + +``` +cilium-dbg bpf vtep-policy delete [flags] +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --config string Config file (default is $HOME/.cilium.yaml) + -D, --debug Enable debug messages + -H, --host string URI to server-side API + --log-driver strings Logging endpoints to use (example: syslog) + --log-opt map Log driver options (example: format=json) +``` + +### SEE ALSO + +* [cilium-dbg bpf vtep-policy](cilium-dbg_bpf_vtep-policy.md) - Manage the VTEP Policy mappings + diff --git a/Documentation/cmdref/cilium-dbg_bpf_vtep-policy_list.md b/Documentation/cmdref/cilium-dbg_bpf_vtep-policy_list.md new file mode 100644 index 0000000000000..aeb65423bd8ec --- /dev/null +++ b/Documentation/cmdref/cilium-dbg_bpf_vtep-policy_list.md @@ -0,0 +1,36 @@ + + +## cilium-dbg bpf vtep-policy list + +List VTEP Policy entries + +### Synopsis + +List VTEP CIDR and their corresponding VTEP MAC/IP. + + +``` +cilium-dbg bpf vtep-policy list [flags] +``` + +### Options + +``` + -h, --help help for list + -o, --output string json| yaml| jsonpath='{}' +``` + +### Options inherited from parent commands + +``` + --config string Config file (default is $HOME/.cilium.yaml) + -D, --debug Enable debug messages + -H, --host string URI to server-side API + --log-driver strings Logging endpoints to use (example: syslog) + --log-opt map Log driver options (example: format=json) +``` + +### SEE ALSO + +* [cilium-dbg bpf vtep-policy](cilium-dbg_bpf_vtep-policy.md) - Manage the VTEP Policy mappings + diff --git a/Documentation/cmdref/cilium-dbg_bpf_vtep-policy_update.md b/Documentation/cmdref/cilium-dbg_bpf_vtep-policy_update.md new file mode 100644 index 0000000000000..2038f1b7ccceb --- /dev/null +++ b/Documentation/cmdref/cilium-dbg_bpf_vtep-policy_update.md @@ -0,0 +1,35 @@ + + +## cilium-dbg bpf vtep-policy update + +Update VTEP Policy entries + +### Synopsis + +Create/Update vtep entry. + + +``` +cilium-dbg bpf vtep-policy update [flags] +``` + +### Options + +``` + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + --config string Config file (default is $HOME/.cilium.yaml) + -D, --debug Enable debug messages + -H, --host string URI to server-side API + --log-driver strings Logging endpoints to use (example: syslog) + --log-opt map Log driver options (example: format=json) +``` + +### SEE ALSO + +* [cilium-dbg bpf vtep-policy](cilium-dbg_bpf_vtep-policy.md) - Manage the VTEP Policy mappings + diff --git a/Documentation/cmdref/cilium-dbg_metrics_list.md b/Documentation/cmdref/cilium-dbg_metrics_list.md index 45e9ce28085f2..a9fdbbbae0d87 100644 --- a/Documentation/cmdref/cilium-dbg_metrics_list.md +++ b/Documentation/cmdref/cilium-dbg_metrics_list.md @@ -11,9 +11,10 @@ cilium-dbg metrics list [flags] ### Options ``` - -h, --help help for list - -p, --match-pattern string Show only metrics whose names match matchpattern - -o, --output string json| yaml| jsonpath='{}' + -h, --help help for list + -p, --match-pattern string Show only metrics whose names match matchpattern + -o, --output string json| yaml| jsonpath='{}' + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") ``` ### Options inherited from parent commands diff --git a/Documentation/cmdref/cilium-operator-alibabacloud.md b/Documentation/cmdref/cilium-operator-alibabacloud.md index 4739f8b6be1f9..baddb7657c844 100644 --- a/Documentation/cmdref/cilium-operator-alibabacloud.md +++ b/Documentation/cmdref/cilium-operator-alibabacloud.md @@ -130,10 +130,11 @@ cilium-operator-alibabacloud [flags] --remove-cilium-node-taints Remove node taint "node.cilium.io/agent-not-ready" from Kubernetes nodes once Cilium is up and running (default true) --set-cilium-is-up-condition Set CiliumIsUp Node condition to mark a Kubernetes Node that a Cilium pod is up and running in that node (default true) --set-cilium-node-taints Set node taint "node.cilium.io/agent-not-ready" from Kubernetes nodes if Cilium is scheduled but not up and running + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created --subnet-ids-filter strings Subnets IDs (separated by commas) --subnet-tags-filter map Subnets tags in the form of k1=v1,k2=v2 (multiple k/v pairs can also be passed by repeating the CLI flag - --synchronize-k8s-nodes Synchronize Kubernetes nodes to kvstore and perform CNP GC (default true) + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --synchronize-k8s-services Synchronize Kubernetes services to kvstore (default true) --taint-sync-workers int Number of workers used to synchronize node tains and conditions (default 10) --unmanaged-pod-watcher-interval int Interval to check for unmanaged kube-dns pods (0 to disable) (default 15) diff --git a/Documentation/cmdref/cilium-operator-alibabacloud_hive.md b/Documentation/cmdref/cilium-operator-alibabacloud_hive.md index c2ab07ffbff20..b34483e6efb6d 100644 --- a/Documentation/cmdref/cilium-operator-alibabacloud_hive.md +++ b/Documentation/cmdref/cilium-operator-alibabacloud_hive.md @@ -94,7 +94,9 @@ cilium-operator-alibabacloud hive [flags] --operator-prometheus-serve-addr string Address to serve Prometheus metrics (default ":9963") --policy-default-local-cluster Control whether policy rules assume by default the local cluster if not explicitly selected (default true) --policy-secrets-namespace string Namespace where secrets used in TLS Interception will be synced to. (default "cilium-secrets") + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --validate-network-policy Whether to enable or disable the informational network policy validator (default true) ``` diff --git a/Documentation/cmdref/cilium-operator-alibabacloud_hive_dot-graph.md b/Documentation/cmdref/cilium-operator-alibabacloud_hive_dot-graph.md index 20e4513e70e2d..8a4b7b01c9ad5 100644 --- a/Documentation/cmdref/cilium-operator-alibabacloud_hive_dot-graph.md +++ b/Documentation/cmdref/cilium-operator-alibabacloud_hive_dot-graph.md @@ -99,7 +99,9 @@ cilium-operator-alibabacloud hive dot-graph [flags] --operator-prometheus-serve-addr string Address to serve Prometheus metrics (default ":9963") --policy-default-local-cluster Control whether policy rules assume by default the local cluster if not explicitly selected (default true) --policy-secrets-namespace string Namespace where secrets used in TLS Interception will be synced to. (default "cilium-secrets") + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --validate-network-policy Whether to enable or disable the informational network policy validator (default true) ``` diff --git a/Documentation/cmdref/cilium-operator-aws.md b/Documentation/cmdref/cilium-operator-aws.md index 4043458e6f147..c7a494daba5aa 100644 --- a/Documentation/cmdref/cilium-operator-aws.md +++ b/Documentation/cmdref/cilium-operator-aws.md @@ -138,10 +138,11 @@ cilium-operator-aws [flags] --remove-cilium-node-taints Remove node taint "node.cilium.io/agent-not-ready" from Kubernetes nodes once Cilium is up and running (default true) --set-cilium-is-up-condition Set CiliumIsUp Node condition to mark a Kubernetes Node that a Cilium pod is up and running in that node (default true) --set-cilium-node-taints Set node taint "node.cilium.io/agent-not-ready" from Kubernetes nodes if Cilium is scheduled but not up and running + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created --subnet-ids-filter strings Subnets IDs (separated by commas) --subnet-tags-filter map Subnets tags in the form of k1=v1,k2=v2 (multiple k/v pairs can also be passed by repeating the CLI flag - --synchronize-k8s-nodes Synchronize Kubernetes nodes to kvstore and perform CNP GC (default true) + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --synchronize-k8s-services Synchronize Kubernetes services to kvstore (default true) --taint-sync-workers int Number of workers used to synchronize node tains and conditions (default 10) --unmanaged-pod-watcher-interval int Interval to check for unmanaged kube-dns pods (0 to disable) (default 15) diff --git a/Documentation/cmdref/cilium-operator-aws_hive.md b/Documentation/cmdref/cilium-operator-aws_hive.md index 235ca0a751bc0..52e8c4e8b7c67 100644 --- a/Documentation/cmdref/cilium-operator-aws_hive.md +++ b/Documentation/cmdref/cilium-operator-aws_hive.md @@ -94,7 +94,9 @@ cilium-operator-aws hive [flags] --operator-prometheus-serve-addr string Address to serve Prometheus metrics (default ":9963") --policy-default-local-cluster Control whether policy rules assume by default the local cluster if not explicitly selected (default true) --policy-secrets-namespace string Namespace where secrets used in TLS Interception will be synced to. (default "cilium-secrets") + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --validate-network-policy Whether to enable or disable the informational network policy validator (default true) ``` diff --git a/Documentation/cmdref/cilium-operator-aws_hive_dot-graph.md b/Documentation/cmdref/cilium-operator-aws_hive_dot-graph.md index 22062b6c8c64d..8b5e810b152f8 100644 --- a/Documentation/cmdref/cilium-operator-aws_hive_dot-graph.md +++ b/Documentation/cmdref/cilium-operator-aws_hive_dot-graph.md @@ -99,7 +99,9 @@ cilium-operator-aws hive dot-graph [flags] --operator-prometheus-serve-addr string Address to serve Prometheus metrics (default ":9963") --policy-default-local-cluster Control whether policy rules assume by default the local cluster if not explicitly selected (default true) --policy-secrets-namespace string Namespace where secrets used in TLS Interception will be synced to. (default "cilium-secrets") + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --validate-network-policy Whether to enable or disable the informational network policy validator (default true) ``` diff --git a/Documentation/cmdref/cilium-operator-azure.md b/Documentation/cmdref/cilium-operator-azure.md index e41f52d93a953..44825713306c9 100644 --- a/Documentation/cmdref/cilium-operator-azure.md +++ b/Documentation/cmdref/cilium-operator-azure.md @@ -133,10 +133,11 @@ cilium-operator-azure [flags] --remove-cilium-node-taints Remove node taint "node.cilium.io/agent-not-ready" from Kubernetes nodes once Cilium is up and running (default true) --set-cilium-is-up-condition Set CiliumIsUp Node condition to mark a Kubernetes Node that a Cilium pod is up and running in that node (default true) --set-cilium-node-taints Set node taint "node.cilium.io/agent-not-ready" from Kubernetes nodes if Cilium is scheduled but not up and running + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created --subnet-ids-filter strings Subnets IDs (separated by commas) --subnet-tags-filter map Subnets tags in the form of k1=v1,k2=v2 (multiple k/v pairs can also be passed by repeating the CLI flag - --synchronize-k8s-nodes Synchronize Kubernetes nodes to kvstore and perform CNP GC (default true) + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --synchronize-k8s-services Synchronize Kubernetes services to kvstore (default true) --taint-sync-workers int Number of workers used to synchronize node tains and conditions (default 10) --unmanaged-pod-watcher-interval int Interval to check for unmanaged kube-dns pods (0 to disable) (default 15) diff --git a/Documentation/cmdref/cilium-operator-azure_hive.md b/Documentation/cmdref/cilium-operator-azure_hive.md index eccde5deeb34f..dba7c53292297 100644 --- a/Documentation/cmdref/cilium-operator-azure_hive.md +++ b/Documentation/cmdref/cilium-operator-azure_hive.md @@ -94,7 +94,9 @@ cilium-operator-azure hive [flags] --operator-prometheus-serve-addr string Address to serve Prometheus metrics (default ":9963") --policy-default-local-cluster Control whether policy rules assume by default the local cluster if not explicitly selected (default true) --policy-secrets-namespace string Namespace where secrets used in TLS Interception will be synced to. (default "cilium-secrets") + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --validate-network-policy Whether to enable or disable the informational network policy validator (default true) ``` diff --git a/Documentation/cmdref/cilium-operator-azure_hive_dot-graph.md b/Documentation/cmdref/cilium-operator-azure_hive_dot-graph.md index 13a30143ce2fa..a7dd0f8ff8353 100644 --- a/Documentation/cmdref/cilium-operator-azure_hive_dot-graph.md +++ b/Documentation/cmdref/cilium-operator-azure_hive_dot-graph.md @@ -99,7 +99,9 @@ cilium-operator-azure hive dot-graph [flags] --operator-prometheus-serve-addr string Address to serve Prometheus metrics (default ":9963") --policy-default-local-cluster Control whether policy rules assume by default the local cluster if not explicitly selected (default true) --policy-secrets-namespace string Namespace where secrets used in TLS Interception will be synced to. (default "cilium-secrets") + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --validate-network-policy Whether to enable or disable the informational network policy validator (default true) ``` diff --git a/Documentation/cmdref/cilium-operator-generic.md b/Documentation/cmdref/cilium-operator-generic.md index a3364cb26cfe5..16edc4354b778 100644 --- a/Documentation/cmdref/cilium-operator-generic.md +++ b/Documentation/cmdref/cilium-operator-generic.md @@ -129,10 +129,11 @@ cilium-operator-generic [flags] --remove-cilium-node-taints Remove node taint "node.cilium.io/agent-not-ready" from Kubernetes nodes once Cilium is up and running (default true) --set-cilium-is-up-condition Set CiliumIsUp Node condition to mark a Kubernetes Node that a Cilium pod is up and running in that node (default true) --set-cilium-node-taints Set node taint "node.cilium.io/agent-not-ready" from Kubernetes nodes if Cilium is scheduled but not up and running + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created --subnet-ids-filter strings Subnets IDs (separated by commas) --subnet-tags-filter map Subnets tags in the form of k1=v1,k2=v2 (multiple k/v pairs can also be passed by repeating the CLI flag - --synchronize-k8s-nodes Synchronize Kubernetes nodes to kvstore and perform CNP GC (default true) + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --synchronize-k8s-services Synchronize Kubernetes services to kvstore (default true) --taint-sync-workers int Number of workers used to synchronize node tains and conditions (default 10) --unmanaged-pod-watcher-interval int Interval to check for unmanaged kube-dns pods (0 to disable) (default 15) diff --git a/Documentation/cmdref/cilium-operator-generic_hive.md b/Documentation/cmdref/cilium-operator-generic_hive.md index 8531f3f496a5d..5201b73cac5b4 100644 --- a/Documentation/cmdref/cilium-operator-generic_hive.md +++ b/Documentation/cmdref/cilium-operator-generic_hive.md @@ -94,7 +94,9 @@ cilium-operator-generic hive [flags] --operator-prometheus-serve-addr string Address to serve Prometheus metrics (default ":9963") --policy-default-local-cluster Control whether policy rules assume by default the local cluster if not explicitly selected (default true) --policy-secrets-namespace string Namespace where secrets used in TLS Interception will be synced to. (default "cilium-secrets") + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --validate-network-policy Whether to enable or disable the informational network policy validator (default true) ``` diff --git a/Documentation/cmdref/cilium-operator-generic_hive_dot-graph.md b/Documentation/cmdref/cilium-operator-generic_hive_dot-graph.md index b2fdff1c78b94..b8955777ad55a 100644 --- a/Documentation/cmdref/cilium-operator-generic_hive_dot-graph.md +++ b/Documentation/cmdref/cilium-operator-generic_hive_dot-graph.md @@ -99,7 +99,9 @@ cilium-operator-generic hive dot-graph [flags] --operator-prometheus-serve-addr string Address to serve Prometheus metrics (default ":9963") --policy-default-local-cluster Control whether policy rules assume by default the local cluster if not explicitly selected (default true) --policy-secrets-namespace string Namespace where secrets used in TLS Interception will be synced to. (default "cilium-secrets") + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --validate-network-policy Whether to enable or disable the informational network policy validator (default true) ``` diff --git a/Documentation/cmdref/cilium-operator.md b/Documentation/cmdref/cilium-operator.md index 11f8d5ec13690..1e8c855f90e69 100644 --- a/Documentation/cmdref/cilium-operator.md +++ b/Documentation/cmdref/cilium-operator.md @@ -143,10 +143,11 @@ cilium-operator [flags] --remove-cilium-node-taints Remove node taint "node.cilium.io/agent-not-ready" from Kubernetes nodes once Cilium is up and running (default true) --set-cilium-is-up-condition Set CiliumIsUp Node condition to mark a Kubernetes Node that a Cilium pod is up and running in that node (default true) --set-cilium-node-taints Set node taint "node.cilium.io/agent-not-ready" from Kubernetes nodes if Cilium is scheduled but not up and running + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created --subnet-ids-filter strings Subnets IDs (separated by commas) --subnet-tags-filter map Subnets tags in the form of k1=v1,k2=v2 (multiple k/v pairs can also be passed by repeating the CLI flag - --synchronize-k8s-nodes Synchronize Kubernetes nodes to kvstore and perform CNP GC (default true) + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --synchronize-k8s-services Synchronize Kubernetes services to kvstore (default true) --taint-sync-workers int Number of workers used to synchronize node tains and conditions (default 10) --unmanaged-pod-watcher-interval int Interval to check for unmanaged kube-dns pods (0 to disable) (default 15) diff --git a/Documentation/cmdref/cilium-operator_hive.md b/Documentation/cmdref/cilium-operator_hive.md index ceecee9422b5f..87ca26bf27ab3 100644 --- a/Documentation/cmdref/cilium-operator_hive.md +++ b/Documentation/cmdref/cilium-operator_hive.md @@ -94,7 +94,9 @@ cilium-operator hive [flags] --operator-prometheus-serve-addr string Address to serve Prometheus metrics (default ":9963") --policy-default-local-cluster Control whether policy rules assume by default the local cluster if not explicitly selected (default true) --policy-secrets-namespace string Namespace where secrets used in TLS Interception will be synced to. (default "cilium-secrets") + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --validate-network-policy Whether to enable or disable the informational network policy validator (default true) ``` diff --git a/Documentation/cmdref/cilium-operator_hive_dot-graph.md b/Documentation/cmdref/cilium-operator_hive_dot-graph.md index 50ccea7dbfcce..749c78b4f01df 100644 --- a/Documentation/cmdref/cilium-operator_hive_dot-graph.md +++ b/Documentation/cmdref/cilium-operator_hive_dot-graph.md @@ -99,7 +99,9 @@ cilium-operator hive dot-graph [flags] --operator-prometheus-serve-addr string Address to serve Prometheus metrics (default ":9963") --policy-default-local-cluster Control whether policy rules assume by default the local cluster if not explicitly selected (default true) --policy-secrets-namespace string Namespace where secrets used in TLS Interception will be synced to. (default "cilium-secrets") + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") --skip-crd-creation When true, Kubernetes Custom Resource Definitions will not be created + --synchronize-k8s-nodes Perform GC of stale node entries from the KVStore (default true) --validate-network-policy Whether to enable or disable the informational network policy validator (default true) ``` diff --git a/Documentation/cmdref/clustermesh-apiserver_clustermesh.md b/Documentation/cmdref/clustermesh-apiserver_clustermesh.md index 2e1b06d2d8195..a36bdfd0be67a 100644 --- a/Documentation/cmdref/clustermesh-apiserver_clustermesh.md +++ b/Documentation/cmdref/clustermesh-apiserver_clustermesh.md @@ -47,6 +47,7 @@ clustermesh-apiserver clustermesh [flags] --pprof-mutex-profile-fraction int Enable mutex contention profiling and set the fraction of sampled events (set to 1 to sample all events) --pprof-port uint16 Port that pprof listens on (default 6063) --prometheus-serve-addr string Address to serve Prometheus metrics + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") ``` ### SEE ALSO diff --git a/Documentation/cmdref/clustermesh-apiserver_clustermesh_hive.md b/Documentation/cmdref/clustermesh-apiserver_clustermesh_hive.md index 9a75f228709f4..a4b3072d3ec20 100644 --- a/Documentation/cmdref/clustermesh-apiserver_clustermesh_hive.md +++ b/Documentation/cmdref/clustermesh-apiserver_clustermesh_hive.md @@ -47,6 +47,7 @@ clustermesh-apiserver clustermesh hive [flags] --pprof-mutex-profile-fraction int Enable mutex contention profiling and set the fraction of sampled events (set to 1 to sample all events) --pprof-port uint16 Port that pprof listens on (default 6063) --prometheus-serve-addr string Address to serve Prometheus metrics + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") ``` ### SEE ALSO diff --git a/Documentation/cmdref/clustermesh-apiserver_clustermesh_hive_dot-graph.md b/Documentation/cmdref/clustermesh-apiserver_clustermesh_hive_dot-graph.md index 301a04eb6edff..a692e7000c6b4 100644 --- a/Documentation/cmdref/clustermesh-apiserver_clustermesh_hive_dot-graph.md +++ b/Documentation/cmdref/clustermesh-apiserver_clustermesh_hive_dot-graph.md @@ -52,6 +52,7 @@ clustermesh-apiserver clustermesh hive dot-graph [flags] --pprof-mutex-profile-fraction int Enable mutex contention profiling and set the fraction of sampled events (set to 1 to sample all events) --pprof-port uint16 Port that pprof listens on (default 6063) --prometheus-serve-addr string Address to serve Prometheus metrics + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") ``` ### SEE ALSO diff --git a/Documentation/cmdref/clustermesh-apiserver_kvstoremesh.md b/Documentation/cmdref/clustermesh-apiserver_kvstoremesh.md index ea9933433204f..c7e85adf85e55 100644 --- a/Documentation/cmdref/clustermesh-apiserver_kvstoremesh.md +++ b/Documentation/cmdref/clustermesh-apiserver_kvstoremesh.md @@ -37,6 +37,7 @@ clustermesh-apiserver kvstoremesh [flags] --pprof-mutex-profile-fraction int Enable mutex contention profiling and set the fraction of sampled events (set to 1 to sample all events) --pprof-port uint16 Port that pprof listens on (default 6064) --prometheus-serve-addr string Address to serve Prometheus metrics + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") ``` ### SEE ALSO diff --git a/Documentation/cmdref/clustermesh-apiserver_kvstoremesh_hive.md b/Documentation/cmdref/clustermesh-apiserver_kvstoremesh_hive.md index c8eb6bf3865b0..bfb1116b10739 100644 --- a/Documentation/cmdref/clustermesh-apiserver_kvstoremesh_hive.md +++ b/Documentation/cmdref/clustermesh-apiserver_kvstoremesh_hive.md @@ -37,6 +37,7 @@ clustermesh-apiserver kvstoremesh hive [flags] --pprof-mutex-profile-fraction int Enable mutex contention profiling and set the fraction of sampled events (set to 1 to sample all events) --pprof-port uint16 Port that pprof listens on (default 6064) --prometheus-serve-addr string Address to serve Prometheus metrics + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") ``` ### SEE ALSO diff --git a/Documentation/cmdref/clustermesh-apiserver_kvstoremesh_hive_dot-graph.md b/Documentation/cmdref/clustermesh-apiserver_kvstoremesh_hive_dot-graph.md index 1a444c8498ac6..e60610c78bba6 100644 --- a/Documentation/cmdref/clustermesh-apiserver_kvstoremesh_hive_dot-graph.md +++ b/Documentation/cmdref/clustermesh-apiserver_kvstoremesh_hive_dot-graph.md @@ -42,6 +42,7 @@ clustermesh-apiserver kvstoremesh hive dot-graph [flags] --pprof-mutex-profile-fraction int Enable mutex contention profiling and set the fraction of sampled events (set to 1 to sample all events) --pprof-port uint16 Port that pprof listens on (default 6064) --prometheus-serve-addr string Address to serve Prometheus metrics + --shell-sock-path string Path to the shell UNIX socket (default "/var/run/cilium/shell.sock") ``` ### SEE ALSO diff --git a/Documentation/configuration/sbom.rst b/Documentation/configuration/sbom.rst index 06ed352a313d2..c7fb9311a1469 100644 --- a/Documentation/configuration/sbom.rst +++ b/Documentation/configuration/sbom.rst @@ -16,10 +16,10 @@ insight into the software supply chain and any potential concerns related to license compliance and security that might exist. The Cilium SBOM is generated using the `syft`_ tool. To learn more about SBOM, see -`what an SBOM can do for you`_. +`what is an SBOM`_. .. _`syft`: https://github.com/anchore/syft -.. _`what an SBOM can do for you`: https://www.chainguard.dev/unchained/what-an-sbom-can-do-for-you +.. _`what is an SBOM`: https://edu.chainguard.dev/open-source/sbom/what-is-an-sbom/ Prerequisites ============= diff --git a/Documentation/contributing/development/datapath_config.rst b/Documentation/contributing/development/datapath_config.rst index 8392fbf8c69ca..2913a4271d064 100644 --- a/Documentation/contributing/development/datapath_config.rst +++ b/Documentation/contributing/development/datapath_config.rst @@ -169,11 +169,11 @@ This will show up in the Go scaffolding as: Foo uint64 `config:"foo"` } -Populate it in the agent through ``pkg/datapath/loader.nodeConfig()``: +Populate it in the agent through ``pkg/datapath/config.NodeConfig()``: .. code-block:: go - func nodeConfig(lnc *datapath.LocalNodeConfiguration) config.Node { + func NodeConfig(lnc *datapath.LocalNodeConfiguration) Node { ... node.Foo = 42 ... diff --git a/Documentation/crdlist.rst b/Documentation/crdlist.rst index 971eafd541e51..6830a62635db4 100644 --- a/Documentation/crdlist.rst +++ b/Documentation/crdlist.rst @@ -20,3 +20,4 @@ - CiliumNode - CiliumNodeConfig - CiliumPodIPPool +- CiliumVtepPolicy diff --git a/Documentation/helm-values.rst b/Documentation/helm-values.rst index 1ce1809be9c83..78d775094cfed 100644 --- a/Documentation/helm-values.rst +++ b/Documentation/helm-values.rst @@ -845,7 +845,7 @@ - list - ``[]`` * - :spelling:ignore:`clustermesh.apiserver.service.nodePort` - - Optional port to use as the node port for apiserver access. WARNING: make sure to configure a different NodePort in each cluster if kube-proxy replacement is enabled, as Cilium is currently affected by a known bug (#24692) when NodePorts are handled by the KPR implementation. If a service with the same NodePort exists both in the local and the remote cluster, all traffic originating from inside the cluster and targeting the corresponding NodePort will be redirected to a local backend, regardless of whether the destination node belongs to the local or the remote cluster. + - Optional port to use as the node port for apiserver access. - int - ``32379`` * - :spelling:ignore:`clustermesh.apiserver.service.type` @@ -1256,6 +1256,10 @@ - Enables masquerading to the source of the route for traffic leaving the node from endpoints. - bool - ``false`` + * - :spelling:ignore:`enableNoServiceEndpointsRoutable` + - Enable routing to a service that has zero endpoints + - bool + - ``true`` * - :spelling:ignore:`enableNonDefaultDenyPolicies` - Enable Non-Default-Deny policies - bool @@ -2947,7 +2951,7 @@ * - :spelling:ignore:`nodeinit.image` - node-init image. - object - - ``{"digest":"sha256:0c91245afb3a4ff78b5cc8c09226806e94a9a10eb0adb74a85e0eeed2a5cae8c","override":null,"pullPolicy":"Always","repository":"quay.io/cilium/startup-script","tag":"1755531540-60ee83e","useDigest":true}`` + - ``{"digest":"sha256:5bdca3c2dec2c79f58d45a7a560bf1098c2126350c901379fe850b7f78d3d757","override":null,"pullPolicy":"Always","repository":"quay.io/cilium/startup-script","tag":"1755531540-60ee83e","useDigest":true}`` * - :spelling:ignore:`nodeinit.nodeSelector` - Node labels for nodeinit pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector - object diff --git a/Documentation/installation/images/rancher_add_cilium_repository.png b/Documentation/installation/images/rancher_add_cilium_repository.png deleted file mode 100644 index c08aff3c06da3..0000000000000 Binary files a/Documentation/installation/images/rancher_add_cilium_repository.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_add_cluster.png b/Documentation/installation/images/rancher_add_cluster.png index 2214ac42b4257..7dcc6df42c3c1 100644 Binary files a/Documentation/installation/images/rancher_add_cluster.png and b/Documentation/installation/images/rancher_add_cluster.png differ diff --git a/Documentation/installation/images/rancher_add_nodes.png b/Documentation/installation/images/rancher_add_nodes.png deleted file mode 100644 index 418b16aac78f5..0000000000000 Binary files a/Documentation/installation/images/rancher_add_nodes.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_add_repository_cluster.png b/Documentation/installation/images/rancher_add_repository_cluster.png deleted file mode 100644 index e1b20ce6b6098..0000000000000 Binary files a/Documentation/installation/images/rancher_add_repository_cluster.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_additional_manifests.png b/Documentation/installation/images/rancher_additional_manifests.png new file mode 100644 index 0000000000000..7f4b4de6a5359 Binary files /dev/null and b/Documentation/installation/images/rancher_additional_manifests.png differ diff --git a/Documentation/installation/images/rancher_app_catalog_cilium.png b/Documentation/installation/images/rancher_app_catalog_cilium.png deleted file mode 100644 index 855a1f1355da1..0000000000000 Binary files a/Documentation/installation/images/rancher_app_catalog_cilium.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_app_target_project.png b/Documentation/installation/images/rancher_app_target_project.png deleted file mode 100644 index 697a6fa4c483c..0000000000000 Binary files a/Documentation/installation/images/rancher_app_target_project.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_apps_click_launch.png b/Documentation/installation/images/rancher_apps_click_launch.png deleted file mode 100644 index aeb9c4eacd72b..0000000000000 Binary files a/Documentation/installation/images/rancher_apps_click_launch.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_cilium_repo.png b/Documentation/installation/images/rancher_cilium_repo.png new file mode 100644 index 0000000000000..7250ec1e7dce3 Binary files /dev/null and b/Documentation/installation/images/rancher_cilium_repo.png differ diff --git a/Documentation/installation/images/rancher_cluster_cilium_app.png b/Documentation/installation/images/rancher_cluster_cilium_app.png index ca9bcada0fc79..f864763b5fa26 100644 Binary files a/Documentation/installation/images/rancher_cluster_cilium_app.png and b/Documentation/installation/images/rancher_cluster_cilium_app.png differ diff --git a/Documentation/installation/images/rancher_cluster_cilium_app_upgrade.png b/Documentation/installation/images/rancher_cluster_cilium_app_upgrade.png index 66d3d791b4052..bbbaac5a1a04c 100644 Binary files a/Documentation/installation/images/rancher_cluster_cilium_app_upgrade.png and b/Documentation/installation/images/rancher_cluster_cilium_app_upgrade.png differ diff --git a/Documentation/installation/images/rancher_cluster_cilium_app_upgrade_version.png b/Documentation/installation/images/rancher_cluster_cilium_app_upgrade_version.png deleted file mode 100644 index 67753d3719a76..0000000000000 Binary files a/Documentation/installation/images/rancher_cluster_cilium_app_upgrade_version.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_cluster_cilium_app_upgrade_versions.png b/Documentation/installation/images/rancher_cluster_cilium_app_upgrade_versions.png new file mode 100644 index 0000000000000..019be7bd664ed Binary files /dev/null and b/Documentation/installation/images/rancher_cluster_cilium_app_upgrade_versions.png differ diff --git a/Documentation/installation/images/rancher_cluster_created.png b/Documentation/installation/images/rancher_cluster_created.png new file mode 100644 index 0000000000000..02c8c1887cdfd Binary files /dev/null and b/Documentation/installation/images/rancher_cluster_created.png differ diff --git a/Documentation/installation/images/rancher_cluster_state_provisioning.png b/Documentation/installation/images/rancher_cluster_state_provisioning.png index 7a57e64fed71d..aac3ab5d95cfb 100644 Binary files a/Documentation/installation/images/rancher_cluster_state_provisioning.png and b/Documentation/installation/images/rancher_cluster_state_provisioning.png differ diff --git a/Documentation/installation/images/rancher_config_yaml.png b/Documentation/installation/images/rancher_config_yaml.png new file mode 100644 index 0000000000000..54d6159590398 Binary files /dev/null and b/Documentation/installation/images/rancher_config_yaml.png differ diff --git a/Documentation/installation/images/rancher_delete_network_plugin.png b/Documentation/installation/images/rancher_delete_network_plugin.png deleted file mode 100644 index 42974abde3182..0000000000000 Binary files a/Documentation/installation/images/rancher_delete_network_plugin.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_edit_as_yaml.png b/Documentation/installation/images/rancher_edit_as_yaml.png deleted file mode 100644 index cbf51984b9f7c..0000000000000 Binary files a/Documentation/installation/images/rancher_edit_as_yaml.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_my_cluster_active.png b/Documentation/installation/images/rancher_my_cluster_active.png deleted file mode 100644 index 9fec664b9a300..0000000000000 Binary files a/Documentation/installation/images/rancher_my_cluster_active.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_network_plugin_none.png b/Documentation/installation/images/rancher_network_plugin_none.png deleted file mode 100644 index 1b9f762aef2e8..0000000000000 Binary files a/Documentation/installation/images/rancher_network_plugin_none.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_node_not_ready.png b/Documentation/installation/images/rancher_node_not_ready.png deleted file mode 100644 index 95afc5ea3c92f..0000000000000 Binary files a/Documentation/installation/images/rancher_node_not_ready.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_registration_command.png b/Documentation/installation/images/rancher_registration_command.png new file mode 100644 index 0000000000000..1f162d5ee567f Binary files /dev/null and b/Documentation/installation/images/rancher_registration_command.png differ diff --git a/Documentation/installation/images/rancher_repositories_list_success.png b/Documentation/installation/images/rancher_repositories_list_success.png deleted file mode 100644 index de12558b5127b..0000000000000 Binary files a/Documentation/installation/images/rancher_repositories_list_success.png and /dev/null differ diff --git a/Documentation/installation/images/rancher_select_cni.png b/Documentation/installation/images/rancher_select_cni.png new file mode 100644 index 0000000000000..ecde0618a0cd7 Binary files /dev/null and b/Documentation/installation/images/rancher_select_cni.png differ diff --git a/Documentation/installation/images/rancher_working_cluster.png b/Documentation/installation/images/rancher_working_cluster.png deleted file mode 100644 index e49e5d3f6f84a..0000000000000 Binary files a/Documentation/installation/images/rancher_working_cluster.png and /dev/null differ diff --git a/Documentation/installation/k8s-install-rancher-existing-nodes.rst b/Documentation/installation/k8s-install-rancher-existing-nodes.rst index 3437971238ffd..8c7155a68b822 100644 --- a/Documentation/installation/k8s-install-rancher-existing-nodes.rst +++ b/Documentation/installation/k8s-install-rancher-existing-nodes.rst @@ -25,7 +25,7 @@ However, as Rancher is using a custom with independent release cycles, Cilium power-users might want to use an out-of-band Cilium installation instead, based on the official `Cilium Helm chart `__, -on top of their Rancher-managed RKE1/RKE2 downstream clusters. +on top of their Rancher-managed RKE2 downstream clusters. This guide explains how to achieve this. .. note:: @@ -33,12 +33,10 @@ This guide explains how to achieve this. This guide only shows a step-by-step guide for Rancher-managed (**non-standalone**) **RKE2** clusters. - However, for a legacy RKE1 cluster, it's even easier. You also need to edit - the cluster YAML and change ``network.cni`` to ``none`` as described in the - :ref:`RKE 1 standalone guide`, but there's no need to copy over - a Control Plane node local KubeConfig manually. Luckily, Rancher allows access - to RKE1 clusters in ``Updating`` state, which are not ready yet. Hence, there's - no chicken-egg issue to resolve. +.. note:: + + This guide shows how to install Cilium on Rancher-managed Custom Clusters. + However, this method also applies to clusters created with providers such as VMware vSphere. Prerequisites ============= @@ -59,41 +57,78 @@ On the Cluster creation page select to create a new ``Custom`` cluster: .. image:: images/rancher_existing_nodes.png -When the ``Create Custom`` page opens, provide at least a name for the cluster. +When the ``Create Custom`` page opens, provide a name for the cluster. +In the same ``Basics`` section, expand ``Container Network`` drop down list and select ``none``. + +.. image:: images/rancher_select_cni.png + Go through the other configuration options and configure the ones that are relevant for your setup. -Next to the ``Cluster Options`` section click the box to ``Edit as YAML``. -The configuration for the cluster will open up in an editor in the window. +Add ``HelmChart`` manifests to install Cilium using the RKE2 built-in Helm Operator. +Go to the ``Additional Manifests`` section and paste the following YAML. Add relevant values for your Cilium installation. + +.. code-block:: yaml + + --- + apiVersion: catalog.cattle.io/v1 + kind: ClusterRepo + metadata: + name: cilium + spec: + url: https://helm.cilium.io + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: cilium + namespace: kube-system + spec: + targetNamespace: kube-system + createNamespace: false + version: v1.18.0 + chart: cilium + repo: https://helm.cilium.io + bootstrap: true + valuesContent: |- + # paste your Cilium values here: + k8sServiceHost: 127.0.0.1 + k8sServicePort: 6443 + kubeProxyReplacement: true + +.. image:: images/rancher_additional_manifests.png + +.. note:: + + ``k8sServiceHost`` should be set to ``127.0.0.1`` and ``k8sServicePort`` to ``6443``. Cilium Agent running on control plane nodes will use local address for communication with Kubernetes API process. + On Control Plane nodes you can verify this by running: + + .. code-block:: shell-session -.. image:: images/rancher_edit_as_yaml.png + $ sudo ss -tulpn | grep 6443 + tcp LISTEN 0 4096 *:6443 *:* users:(("kube-apiserver",pid=124481,fd=3)) -Within the ``Cluster`` CustomResource (``provisioning.cattle.io/v1``), the relevant -parts to change are ``spec.rkeConfig.machineGlobalConfig.cni``, -``spec.rkeConfig.machineGlobalConfig.tls-san``, and optionally -``spec.rkeConfig.chartValues.rke2-calico`` and -``spec.rkeConfig.machineGlobalConfig.disable-kube-proxy``: -.. image:: images/rancher_delete_network_plugin.png + While On worker nodes, Cilium Agent will use the local address to communicate with ``rke2`` process, which is listening on port ``6443``. The process ``rke2`` proxies requests to the Kubernetes API server running on the Control Plane node(s): -It's required to add a DNS record, pointing to the Control Plane node IP(s) -or an L4 load-balancer in front of them, under -``spec.rkeConfig.machineGlobalConfig.tls-san``, as that's required to resolve -a chicken-egg issue further down the line. + .. code-block:: shell-session + + $ sudo ss -tulpn | grep 6443 + tcp LISTEN 0 4096 127.0.0.1:6443 0.0.0.0:* users:(("rke2",pid=113574,fd=8)) -Ensure that ``spec.rkeConfig.machineGlobalConfig.cni`` is set to ``none`` and -``spec.rkeConfig.machineGlobalConfig.tls-san`` lists the mentioned DNS record: -.. image:: images/rancher_network_plugin_none.png +Click the ``Edit as YAML`` box at the bottom of the page. +The cluster configuration will open in an editor within the window. -Optionally, if ``spec.rkeConfig.chartValues.rke2-calico`` is not empty, remove the -full object as you won't deploy Rancher's default CNI. At the same time, change -``spec.rkeConfig.machineGlobalConfig.disable-kube-proxy`` to ``true`` in case you -want to run :ref:`Cilium without Kube-Proxy`. +Within the ``Cluster`` Custom Resource (``provisioning.cattle.io/v1``), +verify the ``rkeConfig`` section. It should consist of the manifests that you added to the ``Additional Manifests`` section. -Make any additional changes to the configuration that are appropriate for your -environment. When you are ready, click ``Create`` and Rancher will create the -cluster. +If you like to disable the default kube-proxy and your Cilium configuration enables :ref:`Kube-Proxy Replacement `, check the ``spec.rkeConfig.machineGlobalConfig`` section and set +``spec.rkeConfig.machineGlobalConfig.disable-kube-proxy`` to ``true``. + +.. image:: images/rancher_config_yaml.png + +When you are ready, click ``Create`` and Rancher will create the cluster. .. image:: images/rancher_cluster_state_provisioning.png @@ -105,116 +140,48 @@ Do not forget to select the correct node roles. Rancher comes with the default t deploy all three roles (``etcd``, ``Control Plane``, and ``Worker``), which is often not what you want for multi-node clusters. -.. image:: images/rancher_add_nodes.png +.. image:: images/rancher_registration_command.png A few seconds after you added at least a single node, you should see the new node(s) -in the ``Machines`` tab. The machine will be stuck in ``Reconciling`` state and -won't become ``Active``: - -.. image:: images/rancher_node_not_ready.png - -That's expected as there's no CNI running on this cluster yet. Unfortunately, this also -means critical pods like ``rke2-coredns-rke2-coredns-*`` and ``cattle-cluster-agent-*`` -are stuck in ``PENDING`` state. Hence, the downstream cluster is not yet able -to register itself on Rancher. - -As a next step, you need to resolve this chicken-egg issue by directly accessing -the downstream cluster's Kubernetes API, without going via Rancher. Rancher will not allow -access to this downstream cluster, as it's still in ``Updating`` state. That's why you -can't use the downstream cluster's KubeConfig provided by the Rancher management console/UI. - -Copy ``/etc/rancher/rke2/rke2.yaml`` from the first downstream cluster Control Plane -node to your jump/bastion host where you have ``helm`` installed and can access the -Cilium Helm charts. - -.. code-block:: shell-session - - scp root@:/etc/rancher/rke2/rke2.yaml . - -Search and replace ``127.0.0.1`` (``clusters[0].cluster.server``) with the -already mentioned DNS record pointing to the Control Plane / L4 load-balancer IP(s). - -.. code-block:: yaml - - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0...S0K - server: https://127.0.0.1:6443 - name: default - contexts: {} - -Check if you can access the Kubernetes API: - -.. code-block:: shell-session - - export KUBECONFIG=$(pwd)/my-cluster-kubeconfig.yaml - kubectl get nodes - NAME STATUS ROLES AGE VERSION - rancher-demo-node NotReady control-plane,etcd,master 44m v1.27.8+rke2r1 - -If successful, you can now install Cilium via Helm CLI: - -.. parsed-literal:: - - helm install cilium |CHART_RELEASE| \\ - --namespace kube-system \\ - -f my-cluster-cilium-values.yaml +in the ``Machines`` tab. Cilium CNI will be installed during the cluster bootstrap process +by Helm Operator, which creates a Kubernetes Job that will install Cilium on the cluster. After a few minutes, you should see that the node changed to the ``Ready`` status: .. code-block:: shell-session - kubectl get nodes - NAME STATUS ROLES AGE VERSION - rancher-demo-node Ready control-plane,etcd,master 48m v1.27.8+rke2r1 + kubectl get nodes -A + NAME STATUS ROLES AGE VERSION + ip-10-1-1-167 Ready control-plane,etcd,master,worker 41m v1.32.6+rke2r1 + ip-10-1-1-231 Ready control-plane,etcd,master,worker 41m v1.32.6+rke2r1 + ip-10-1-1-50 Ready control-plane,etcd,master,worker 45m v1.32.6+rke2r1 Back in the Rancher UI, you should see that the cluster changed to the healthy ``Active`` status: -.. image:: images/rancher_my_cluster_active.png +.. image:: images/rancher_cluster_created.png -That's it. You can now normally work with this cluster as if you -installed the CNI the default Rancher way. Additional nodes can now be added -straightaway and the "local Control Plane RKE2 KubeConfig" workaround -is not required anymore. +That's it! You can now work with this cluster as if you had installed the CNI using the default Rancher method. +You can scale the cluster up or down, add or remove nodes, and so on. -Optional: Add Cilium to Rancher Registries -========================================== +Verify Cilium Installation +========================== -One small, optional convenience item would be to add the Cilium Helm repository -to Rancher so that, in the future, Cilium can easily be upgraded via Rancher UI. +After the installation, the Cilium repository and Helm release will be tracked by Rancher. You can manage the Cilium lifecycle +using the Rancher UI. To verify that Cilium is installed, check the Cilium app in the Rancher UI. -You have two options available: - -**Option 1**: Navigate to ``Cluster Management`` -> ``Advanced`` -> ``Repositories`` and -click the ``Create`` button: - -.. image:: images/rancher_add_repository.png - -**Option 2**: Alternatively, you can also just add the Cilium Helm repository -on a single cluster by navigating to ```` -> ``Apps`` -> ``Repositories``: - -.. image:: images/rancher_add_repository_cluster.png - -For either option, in the window that opens, add the official Cilium Helm chart -repository (``https://helm.cilium.io``) to the Rancher repository list: - -.. image:: images/rancher_add_cilium_repository.png - -Once added, you should see the Cilium repository in the repositories list: - -.. image:: images/rancher_repositories_list_success.png - -If you now head to ```` -> ``Apps`` -> ``Installed Apps``, you -should see the ``cilium`` app. Ensure ``All Namespaces`` or -``Project: System -> kube-system`` is selected at the top of the page. +Navigate to ```` -> ``Apps`` -> ``Installed Apps``. From the top drop-down menu, select +``All Namespaces`` or ``Project: System -> kube-system`` to see the Cilium app. .. image:: images/rancher_cluster_cilium_app.png -Since you added the Cilium repository, you will now see a small hint on this app entry +The Cilium Helm repository has been added to Rancher within the ``Additional Manifests`` section. + +.. image:: images/rancher_cilium_repo.png + +Once the new Cilium version will be available, you will now see a small hint on this app entry when there's a new Cilium version released. You can then upgrade directly via Rancher UI. .. image:: images/rancher_cluster_cilium_app_upgrade.png -.. image:: images/rancher_cluster_cilium_app_upgrade_version.png \ No newline at end of file +.. image:: images/rancher_cluster_cilium_app_upgrade_versions.png \ No newline at end of file diff --git a/Documentation/network/bgp-control-plane/bgp-control-plane-operation.rst b/Documentation/network/bgp-control-plane/bgp-control-plane-operation.rst index a272effb45af9..4535e98377301 100644 --- a/Documentation/network/bgp-control-plane/bgp-control-plane-operation.rst +++ b/Documentation/network/bgp-control-plane/bgp-control-plane-operation.rst @@ -466,11 +466,17 @@ Service Losing All Backends If all service backends are gone due to an outage or a configuration mistake, BGP Control Plane behaves differently depending on the Service's -``externalTrafficPolicy``. When the ``externalTrafficPolicy`` is set to -``Cluster``, the Service's VIP remains advertised from all nodes selected by the -``CiliumBGPPeeringPolicy`` or ``CiliumBGPClusterConfig``. When the ``externalTrafficPolicy`` -is set to ``Local``, the advertisement stops entirely because the Service's VIP is only advertised -from the node where the Service backends are running. +``externalTrafficPolicy`` and ``--enable-no-service-endpoints-routable`` flag. + +When the ``externalTrafficPolicy`` is set to ``Cluster``, then the +Service's VIP remains advertised from all nodes selected by the +``CiliumBGPPeeringPolicy`` or ``CiliumBGPClusterConfig`` **only** when +``--enable-no-service-endpoints-routable`` is true (the default). If the flag is +set to ``false`` then Service's VIP is withdrawn. + +When the ``externalTrafficPolicy`` is set to ``Local``, the advertisement stops +entirely because the Service's VIP is only advertised from the node where the +Service backends are running no matter on the value of ``--enable-no-service-endpoints-routable``. Mitigation '''''''''' diff --git a/Documentation/network/clustermesh/clustermesh.rst b/Documentation/network/clustermesh/clustermesh.rst index 5706779a9acb5..050643001b587 100644 --- a/Documentation/network/clustermesh/clustermesh.rst +++ b/Documentation/network/clustermesh/clustermesh.rst @@ -231,7 +231,6 @@ then this will also wait for the LoadBalancer to be assigned an IP. - 10.168.0.89:2379 ✅ Service "clustermesh-apiserver" of type "LoadBalancer" found 🔌 Cluster Connections: - 🔀 Global services: [ min:0 / avg:0.0 / max:0 ] Connect Clusters @@ -267,7 +266,6 @@ The output will look something like this: ✅ All 2 nodes are connected to all clusters [min:1 / avg:1.0 / max:1] 🔌 Cluster Connections: - cilium-cli-ci-multicluster-2-168: 2/2 configured, 2/2 connected - 🔀 Global services: [ min:6 / avg:6.0 / max:6 ] If this step does not complete successfully, proceed to the troubleshooting section. diff --git a/Documentation/network/clustermesh/mcsapi.rst b/Documentation/network/clustermesh/mcsapi.rst index 5d824fc5183b8..411ef6657a0dc 100644 --- a/Documentation/network/clustermesh/mcsapi.rst +++ b/Documentation/network/clustermesh/mcsapi.rst @@ -24,8 +24,8 @@ You first need to install the required MCS-API CRDs: .. code-block:: shell-session - kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/mcs-api/62ede9a032dcfbc41b3418d7360678cb83092498/config/crd/multicluster.x-k8s.io_serviceexports.yaml - kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/mcs-api/62ede9a032dcfbc41b3418d7360678cb83092498/config/crd/multicluster.x-k8s.io_serviceimports.yaml + kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/mcs-api/79efdd37ed2bf99b4ade250e4c0f4d62a4e970a2/config/crd/multicluster.x-k8s.io_serviceexports.yaml + kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/mcs-api/79efdd37ed2bf99b4ade250e4c0f4d62a4e970a2/config/crd/multicluster.x-k8s.io_serviceimports.yaml To install Cilium with MCS-API support, run: diff --git a/Documentation/network/kubernetes/kubeproxy-free.rst b/Documentation/network/kubernetes/kubeproxy-free.rst index 8808d673268cd..46297bc4df8e4 100644 --- a/Documentation/network/kubernetes/kubeproxy-free.rst +++ b/Documentation/network/kubernetes/kubeproxy-free.rst @@ -801,20 +801,6 @@ annotation mode with SNAT default would look as follows: When using annotation-based DSR mode (``bpf.lbModeAnnotation=true``), as in the previous example, you must explicitly specify the ``loadBalancer.dsrDispatch`` parameter to define how DSR packets are dispatched to backends. Valid options are ``opt``, ``ipip``, and ``geneve``. - For example, for environments where Geneve encapsulation is not suitable, you can use IPIP instead: - - .. parsed-literal:: - - helm install cilium |CHART_RELEASE| \\ - --namespace kube-system \\ - --set routingMode=native \\ - --set kubeProxyReplacement=true \\ - --set loadBalancer.mode=snat \\ - --set loadBalancer.dsrDispatch=ipip \\ - --set bpf.lbModeAnnotation=true \\ - --set k8sServiceHost=${API_SERVER_IP} \\ - --set k8sServicePort=${API_SERVER_PORT} - Annotation-based Load Balancing Algorithm Selection *************************************************** diff --git a/Documentation/network/servicemesh/ingress.rst b/Documentation/network/servicemesh/ingress.rst index 0711486f63a41..89596373d125f 100644 --- a/Documentation/network/servicemesh/ingress.rst +++ b/Documentation/network/servicemesh/ingress.rst @@ -183,11 +183,18 @@ Supported Ingress Annotations | ``enforce-ingress-https`` configuration | file setting (or ``ingressController.enforceHttps`` | in Helm). - | + | | Any host with TLS config will have redirects to | HTTPS configured for each match specified in the | Ingress. - unspecified + * - ``ingress.cilium.io/request-timeout`` + - | Request timeout in seconds for Ingress backend HTTP requests. + | + | Note that if the annotation is present, it will override + | any value set by the ``ingress-default-request-timeout`` operator flag. + | If neither is set, defaults to ``0`` (no limit) + - ``0`` Additionally, cloud-provider specific annotations for the LoadBalancer Service are supported. @@ -334,7 +341,7 @@ Cilium's Ingress features: http ingress-and-network-policy - path-types + path-types grpc tls-termination tls-default-certificate diff --git a/Documentation/observability/metrics.rst b/Documentation/observability/metrics.rst index c17b3e17f1e97..ac8b881a8ed6f 100644 --- a/Documentation/observability/metrics.rst +++ b/Documentation/observability/metrics.rst @@ -47,6 +47,12 @@ if you want to disable them, set Helm value ``operator.prometheus.enabled=false` --set prometheus.enabled=true \\ --set operator.prometheus.enabled=true +Cilium Metrics Scraping +----------------------- + +Prometheus Port Configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + The ports can be configured via ``prometheus.port``, ``envoy.prometheus.port``, or ``operator.prometheus.port`` respectively. @@ -90,26 +96,45 @@ option is set in the ``scrape_configs`` section: replacement: ${1}:${2} target_label: __address__ +Prometheus Operator ServiceMonitor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can automatically create a +`Prometheus Operator `__ +``ServiceMonitor`` by setting ``prometheus.serviceMonitor.enabled=true``, or +``envoy.prometheus.serviceMonitor.enabled=true``, or +``operator.prometheus.serviceMonitor.enabled=true`` respectively. + .. _hubble_metrics: Hubble Metrics ============== -While Cilium metrics allow you to monitor the state Cilium itself, +While Cilium metrics allow you to monitor the state of Cilium itself, Hubble metrics on the other hand allow you to monitor the network behavior of your Cilium-managed Kubernetes pods with respect to connectivity and security. -Installation ------------- - -To deploy Cilium with Hubble metrics enabled, you need to enable Hubble with -``hubble.enabled=true`` and provide a set of Hubble metrics you want to -enable via ``hubble.metrics.enabled``. - Some of the metrics can also be configured with additional options. See the :ref:`Hubble exported metrics` section for the full list of available metrics and their options. +Static or dynamic exporter +-------------------------- + +Hubble Metrics can either be configured with a static or dynamic exporter. + +The dynamic metrics exporter allows you to change defined metrics as needed +without requiring an agent restart. + + +Installation with a static metrics exporter +------------------------------------------- + +To deploy Cilium with Hubble Metrics static exporter enabled, you need to enable +Hubble with ``hubble.enabled=true`` and provide a set of Hubble metrics you want to +enable via ``hubble.metrics.enabled``. + + .. parsed-literal:: helm install cilium |CHART_RELEASE| \\ @@ -120,6 +145,120 @@ section for the full list of available metrics and their options. --set hubble.metrics.enableOpenMetrics=true \\ --set hubble.metrics.enabled="{dns,drop,tcp,flow,port-distribution,icmp,httpV2:exemplars=true;labelsContext=source_ip\\,source_namespace\\,source_workload\\,destination_ip\\,destination_namespace\\,destination_workload\\,traffic_direction}" + +Installation with a dynamic metrics exporter +-------------------------------------------- + +To deploy Cilium with Hubble dynamic metrics enabled, you need to enable Hubble +with ``hubble.enabled=true`` and ``hubble.metrics.dynamic.enabled=true``. + +In this example, a ``ConfigMap`` with a set of metrics will be applied before +enabling the exporter, but the desired set of metrics (together with the +``ConfigMap``) can be created during installation. + +See the :ref:`helm_reference` (keys with ``hubble.metrics.dynamic.*``) + + + +.. code-block:: yaml + + apiVersion: v1 + kind: ConfigMap + metadata: + name: cilium-dynamic-metrics-config + namespace: kube-system + data: + dynamic-metrics.yaml: | + metrics: + - name: dns + - contextOptions: + - name: sourceContext + values: + - workload-name + - reserved-identity + - name: destinationContext + values: + - workload-name + - reserved-identity + name: flow + - name: drop + - name: tcp + - contextOptions: + - name: sourceContext + values: + - workload-name + - reserved-identity + name: icmp + - contextOptions: + - name: exemplars + values: + - true + - name: labelsContext + values: + - source_ip + - source_namespace + - source_workload + - destination_ip + - destination_namespace + - destination_workload + - traffic_direction + - name: sourceContext + values: + - workload-name + - reserved-identity + - name: destinationContext + values: + - workload-name + - reserved-identity + name: httpV2 + - contextOptions: + - name: sourceContext + values: + - app + - workload-name + - pod + - reserved-identity + - name: destinationContext + values: + - app + - workload-name + - pod + - dns + - reserved-identity + - name: labelsContext + values: + - source_namespace + - destination_namespace + excludeFilters: + - destination_pod: + - default/ + name: policy + +Deploy the :term:`ConfigMap`: + +.. code-block:: shell-session + + kubectl apply -f dynamic-metrics.yaml + +.. parsed-literal:: + + helm install cilium |CHART_RELEASE| \\ + --namespace kube-system \\ + --set prometheus.enabled=true \\ + --set operator.prometheus.enabled=true \\ + --set hubble.enabled=true \\ + --set hubble.metrics.enableOpenMetrics=true \\ + --set hubble.metrics.enabled=[] \\ + --set hubble.metrics.dynamic.enabled=true \\ + --set hubble.metrics.dynamic.config.configMapName=cilium-dynamic-metrics-config \\ + --set hubble.metrics.dynamic.config.createConfigMap=false + +Hubble Metrics Scraping +----------------------- + +Prometheus Port Configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + The port of the Hubble metrics can be configured with the ``hubble.metrics.port`` Helm value. @@ -160,6 +299,13 @@ have it scrape all Hubble metrics from the endpoints automatically: regex: (.+)(?::\d+);(\d+) replacement: $1:$2 +Prometheus Operator ServiceMonitor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can automatically create a +`Prometheus Operator `__ +``ServiceMonitor`` by setting ``hubble.metrics.serviceMonitor.enabled=true``. + .. _hubble_open_metrics: OpenMetrics @@ -371,10 +517,11 @@ Clustermesh =============================================== ============================================================ ========== ================================================================= Name Labels Default Description =============================================== ============================================================ ========== ================================================================= -``clustermesh_global_services`` ``source_cluster``, ``source_node_name`` Enabled The total number of global services in the cluster mesh +``clustermesh_remote_cluster_services`` ``source_cluster``, ``source_node_name``, ``target_cluster`` Enabled The total number of services per remote cluster +``clustermesh_remote_cluster_endpoints`` ``source_cluster``, ``source_node_name``, ``target_cluster`` Enabled The total number of endpoints per remote cluster +``clustermesh_remote_cluster_nodes`` ``source_cluster``, ``source_node_name``, ``target_cluster`` Enabled The total number of nodes per remote cluster ``clustermesh_remote_clusters`` ``source_cluster``, ``source_node_name`` Enabled The total number of remote clusters meshed with the local cluster ``clustermesh_remote_cluster_failures`` ``source_cluster``, ``source_node_name``, ``target_cluster`` Enabled The total number of failures related to the remote cluster -``clustermesh_remote_cluster_nodes`` ``source_cluster``, ``source_node_name``, ``target_cluster`` Enabled The total number of nodes in the remote cluster ``clustermesh_remote_cluster_last_failure_ts`` ``source_cluster``, ``source_node_name``, ``target_cluster`` Enabled The timestamp of the last failure of the remote cluster ``clustermesh_remote_cluster_readiness_status`` ``source_cluster``, ``source_node_name``, ``target_cluster`` Enabled The readiness status of the remote cluster =============================================== ============================================================ ========== ================================================================= @@ -865,8 +1012,19 @@ Name Labels ``serviceexport_info`` ``serviceexport``, ``namespace`` Enabled Information about ServiceExport in the local cluster ``serviceexport_status_condition`` ``serviceexport``, ``namespace``, ``condition``, ``status`` Enabled Status Condition of ServiceExport in the local cluster ``serviceimport_info`` ``serviceimport``, ``namespace`` Enabled Information about ServiceImport in the local cluster +``serviceimport_status_condition`` ``serviceimport``, ``namespace``, ``condition``, ``status`` Enabled Status Condition of ServiceImport in the local cluster ==================================== ============================================================ ========== =========================================================== +Clustermesh +~~~~~~~~~~~ + +============================================== ======================================= ========== ================================================================== +Name Labels Default Description +============================================== ======================================= ========== ================================================================== +``clustermesh_remote_cluster_services`` ``source_cluster``, ``target_cluster`` Enabled The total number of services per remote cluster +``clustermesh_remote_cluster_service_exports`` ``source_cluster``, ``target_cluster`` Enabled The total number of MCS-API service exports per remote cluster +============================================== ======================================= ========== ================================================================== + Hubble ------ diff --git a/Documentation/operations/performance/tuning.rst b/Documentation/operations/performance/tuning.rst index c8d728eb42b49..91ec45c773543 100644 --- a/Documentation/operations/performance/tuning.rst +++ b/Documentation/operations/performance/tuning.rst @@ -365,6 +365,20 @@ To enable the iptables connection-tracking bypass: --set installNoConntrackIptablesRules=true \\ --set kubeProxyReplacement=true +If a Pod has the ``hostNetwork`` flag enabled, the ports for which connection tracking should be skipped +must be explicitly listed using the ``network.cilium.io/no-track-host-ports`` annotation: + +.. code-block:: yaml + + apiVersion: v1 + kind: Pod + metadata: + annotations: + network.cilium.io/no-track-host-ports: "999/tcp,8123/tcp" + +.. note:: + Only UDP and TCP transport protocols are supported with the network.cilium.io/no-track-host-ports annotation at the time of writing. + Hubble ====== diff --git a/Documentation/operations/troubleshooting_clustermesh.rst b/Documentation/operations/troubleshooting_clustermesh.rst index 61f9f14d7e919..7d5092257e51a 100644 --- a/Documentation/operations/troubleshooting_clustermesh.rst +++ b/Documentation/operations/troubleshooting_clustermesh.rst @@ -73,7 +73,7 @@ you may perform the following steps to troubleshoot ClusterMesh issues. #. Validate that ClusterMesh is healthy running ``cilium-dbg status --all-clusters`` inside each Cilium agent:: - ClusterMesh: 1/1 remote clusters ready, 10 global-services + ClusterMesh: 1/1 remote clusters ready k8s-c2: ready, 3 nodes, 25 endpoints, 8 identities, 10 services, 0 MCS-API service exports, 0 reconnections (last: never) └ etcd: 1/1 connected, leases=0, lock lease-ID=7c028201b53de662, has-quorum=true: https://k8s-c2.mesh.cilium.io:2379 - 3.5.4 (Leader) └ remote configuration: expected=true, retrieved=true, cluster-id=3, kvstoremesh=false, sync-canaries=true, service-exports=disabled diff --git a/Documentation/operations/upgrade.rst b/Documentation/operations/upgrade.rst index fded21e97cacc..fe3eb892cce58 100644 --- a/Documentation/operations/upgrade.rst +++ b/Documentation/operations/upgrade.rst @@ -305,7 +305,8 @@ communicating via the proxy must reconnect to re-establish connections. to update your network policies. * Kafka Network Policy support is deprecated and will be removed in Cilium v1.20. * Hubble field mask support was stabilized. In the Observer gRPC API, ``GetFlowsRequest.Experimental.field_mask`` was removed in favor of ``GetFlowsRequest.field_mask``. In the Hubble CLI, the ``--experimental-field-mask`` has been renamed to ``--field-mask`` and ``--experimental-use-default-field-mask`` renamed to ``-use-default-field-mask`` (now ``true`` by default). - +* Cilium-agent ClusterMesh status will no longer report the global services count. When using the CLI + with a version lower than 1.19, the global services count will be reported as 0. * ``enable-remote-node-masquerade`` config option is introduced. To masquerade traffic to remote nodes in BPF masquerading mode, use the option ``enable-remote-node-masquerade: "true"``. @@ -315,6 +316,7 @@ communicating via the proxy must reconnect to re-establish connections. This flag currently masquerades traffic to node ``InternalIP`` addresses. This may change in future. See :gh-issue:`35823` and :gh-issue:`17177` for further discussion on this topic. +* MCS-API CRDs need to be updated, see the MCS-API :ref:`clustermesh_mcsapi_prereqs` for updated CRD links. Removed Options ~~~~~~~~~~~~~~~ @@ -329,7 +331,9 @@ Removed Options Deprecated Options ~~~~~~~~~~~~~~~~~~ - +* The ``--enable-ipsec-encrypted-overlay`` flag has no effect and will be removed in Cilium 1.20. Starting from + Cilium 1.18 the IPsec encryption is always applied after overlay encapsulation, and therefore this special opt-in + flag is no longer needed. Helm Options ~~~~~~~~~~~~ @@ -355,6 +359,8 @@ Bugtool Options Added Metrics ~~~~~~~~~~~~~ +* ``cilium_agent_clustermesh_remote_cluster_endpoints`` was added and report + the total number of endpoints per remote cluster in a ClusterMesh environment. Removed Metrics ~~~~~~~~~~~~~~~ @@ -380,6 +386,15 @@ As well, any remaining Operator k8s workqueue metrics that use the label ``queue * ``k8s_client_rate_limiter_duration_seconds`` no longer has labels ``path`` and ``method``. +The following metrics: +* ``cilium_agent_clustermesh_global_services`` +* ``cilium_operator_clustermesh_global_services`` +* ``cilium_operator_clustermesh_global_service_exports`` +now report per cluster metric instead of a "global" count and were renamed to respectively: +* ``cilium_agent_clustermesh_remote_cluster_services`` +* ``cilium_operator_clustermesh_remote_cluster_services`` +* ``cilium_operator_clustermesh_remote_cluster_service_exports`` + Deprecated Metrics ~~~~~~~~~~~~~~~~~~ diff --git a/Documentation/reference-guides/bpf/toolchain.rst b/Documentation/reference-guides/bpf/toolchain.rst index 9cfd117aab526..bd199b7c6cdd1 100644 --- a/Documentation/reference-guides/bpf/toolchain.rst +++ b/Documentation/reference-guides/bpf/toolchain.rst @@ -151,24 +151,22 @@ Compiling iproute2 `````````````````` Similar to the ``net`` (fixes only) and ``net-next`` (new features) kernel trees, -the iproute2 git tree has two branches, namely ``master`` and ``net-next``. The -``master`` branch is based on the ``net`` tree and the ``net-next`` branch is -based against the ``net-next`` kernel tree. This is necessary, so that changes -in header files can be synchronized in the iproute2 tree. +iproute2 is split into two separate trees, namely ``iproute`` and ``iproute2-next``. +The ``iproute2`` repository is based on the ``net`` tree and the ``iproute2-next`` +repository is based against the ``net-next`` kernel tree. This is necessary, +so that changes in header files can be synchronized in the iproute2 tree. -In order to clone the iproute2 ``master`` branch, the following command can -be used: +To clone the stable ``iproute2`` repository: .. code-block:: shell-session $ git clone https://git.kernel.org/pub/scm/network/iproute2/iproute2.git -Similarly, to clone into mentioned ``net-next`` branch of iproute2, run the -following: +Similarly, to clone the mentioned development ``iproute2-next`` tree: .. code-block:: shell-session - $ git clone -b net-next https://git.kernel.org/pub/scm/network/iproute2/iproute2.git + $ git clone https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git After that, proceed with the build and installation: diff --git a/Documentation/security/policy/language.rst b/Documentation/security/policy/language.rst index 16eacd8814b51..4f33aacf13923 100644 --- a/Documentation/security/policy/language.rst +++ b/Documentation/security/policy/language.rst @@ -1339,3 +1339,6 @@ Host Policies known issues services for the native device (such as NodePort), hosts will enforce Host Policies on service addresses rather than the service endpoints. For details, refer to :gh-issue:`12545`. + +- Host Firewall and thus Host Policies do not work together with IPsec. + For details, refer to :gh-issue:`41854`. \ No newline at end of file diff --git a/Documentation/security/threat-model.rst b/Documentation/security/threat-model.rst index 7a134ba7e6a82..72111d154ea03 100644 --- a/Documentation/security/threat-model.rst +++ b/Documentation/security/threat-model.rst @@ -187,16 +187,15 @@ Recommended Controls Limited-privilege Host Attacker ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In this scenario, the attacker is someone with the ability to run -arbitrary code with direct access to the host PID or network namespace -(or both), but without "root" privileges that would allow them to -disable Cilium components or undermine the eBPF and other kernel state -Cilium relies on. +In this scenario, the attacker is someone with the ability to run arbitrary +code with direct access to the host PID or network namespace (or both), but +without "root"-equivalent privileges that would allow them to disable Cilium +components or undermine the eBPF and other kernel state Cilium relies on. This level of access could exist for a variety of reasons, including: -- Pods or other containers running in the host PID or network - namespace, but not with "root" privileges. This includes +- Pods or other containers running in the host PID or network namespace, + but without "root" privileges or capabilities. This includes ``hostNetwork: true`` and ``hostPID: true`` containers. - Non-"root" SSH or other console access to a node. - A containerized workload that has "escaped" the container namespace @@ -303,7 +302,8 @@ the local host. This access could exist for several reasons, including: - A containerized workload that has escaped the container namespace as a privileged user. - Pods running with ``privileged: true`` or other significant - capabilities like ``CAP_SYS_ADMIN`` or ``CAP_BPF``. + capabilities like ``CAP_BPF``, ``CAP_NET_ADMIN``, ``CAP_NET_RAW``, or + ``CAP_SYS_ADMIN``. .. image:: images/cilium_threat_model_root.png diff --git a/Makefile b/Makefile index 77a949132fb55..df1466e70c0c3 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ debug: all include Makefile.defs -SUBDIRS_CILIUM_CONTAINER := cilium-dbg daemon cilium-health bugtool tools/mount tools/sysctlfix plugins/cilium-cni +SUBDIRS_CILIUM_CONTAINER := cilium-dbg daemon cilium-health bugtool hubble tools/mount tools/sysctlfix plugins/cilium-cni SUBDIR_OPERATOR_CONTAINER := operator SUBDIR_RELAY_CONTAINER := hubble-relay SUBDIR_CLUSTERMESH_APISERVER_CONTAINER := clustermesh-apiserver diff --git a/Makefile.defs b/Makefile.defs index 851af483d135c..f89465775be95 100644 --- a/Makefile.defs +++ b/Makefile.defs @@ -184,21 +184,27 @@ CGO_ENABLED ?= 0 GOEXPERIMENT ?= # Support CGO cross-compiling for amd64 and arm64 targets -CGO_CC = +GO_BUILD_ENV = CGO_ENABLED=$(CGO_ENABLED) CROSS_ARCH = ifneq ($(GOARCH),$(NATIVE_ARCH)) CROSS_ARCH = $(GOARCH) endif ifeq ($(CROSS_ARCH),arm64) - CGO_CC = CC=aarch64-linux-gnu-gcc + GO_BUILD_ENV += CC=aarch64-linux-gnu-gcc else ifeq ($(CROSS_ARCH),amd64) - CGO_CC = CC=x86_64-linux-gnu-gcc + GO_BUILD_ENV += CC=x86_64-linux-gnu-gcc +endif + +ifneq ($(GOARCH),) + GO_BUILD_ENV += GOARCH=$(GOARCH) +endif + +ifneq ($(GOOS),) + GO_BUILD_ENV += GOOS=$(GOOS) endif ifneq ($(GOEXPERIMENT),) - GO_BUILD = GOEXPERIMENT=$(GOEXPERIMENT) CGO_ENABLED=$(CGO_ENABLED) $(CGO_CC) $(GO) build -else - GO_BUILD = CGO_ENABLED=$(CGO_ENABLED) $(CGO_CC) $(GO) build + GO_BUILD_ENV += GOEXPERIMENT=$(GOEXPERIMENT) endif ifneq ($(RACE),) @@ -231,7 +237,7 @@ ifeq ($(NOOPT),1) GO_BUILD_FLAGS += -gcflags="all=-N -l" endif -GO_BUILD += $(GO_BUILD_FLAGS) +GO_BUILD = $(GO_BUILD_ENV) $(GO) build $(GO_BUILD_FLAGS) GO_TEST = CGO_ENABLED=0 $(GO) test $(GO_TEST_FLAGS) GO_CLEAN = $(GO) clean $(GO_CLEAN_FLAGS) diff --git a/README.rst b/README.rst index def128df30948..2e320311b9541 100644 --- a/README.rst +++ b/README.rst @@ -56,11 +56,11 @@ Listed below are the actively maintained release branches along with their lates patch release, corresponding image pull tags and their release notes: +---------------------------------------------------------+------------+------------------------------------+----------------------------------------------------------------------------+ -| `v1.18 `__ | 2025-08-15 | ``quay.io/cilium/cilium:v1.18.1`` | `Release Notes `__ | +| `v1.18 `__ | 2025-09-16 | ``quay.io/cilium/cilium:v1.18.2`` | `Release Notes `__ | +---------------------------------------------------------+------------+------------------------------------+----------------------------------------------------------------------------+ -| `v1.17 `__ | 2025-08-15 | ``quay.io/cilium/cilium:v1.17.7`` | `Release Notes `__ | +| `v1.17 `__ | 2025-09-22 | ``quay.io/cilium/cilium:v1.17.8`` | `Release Notes `__ | +---------------------------------------------------------+------------+------------------------------------+----------------------------------------------------------------------------+ -| `v1.16 `__ | 2025-08-15 | ``quay.io/cilium/cilium:v1.16.13`` | `Release Notes `__ | +| `v1.16 `__ | 2025-09-22 | ``quay.io/cilium/cilium:v1.16.15`` | `Release Notes `__ | +---------------------------------------------------------+------------+------------------------------------+----------------------------------------------------------------------------+ Architectures diff --git a/USERS.md b/USERS.md index f98d818327479..452d372d8cb60 100644 --- a/USERS.md +++ b/USERS.md @@ -331,6 +331,11 @@ Users (Alphabetically) U: Service load-balancing, Encryption, CNI, NetworkPolicies Q: @kevholditch-f3, samo-f3, ewilde-form3 + * N: FPT Telecom + D: FTEL uses Cilium as their CNI plugin to handle the massive CPE Management traffic to the backends + U: CNI, CiliumclusterWideNetworkPolicy, CiliumNetworkPolicy, Kube-Proxy Replacement, Hubble, Direct Routing, Egress Gateway, Service Load Balancing, L2 Announcement, BGP Advertisement + Q: @minhng99 + * N: FRSCA - Factory for Repeatable Secure Creation of Artifacts D: FRSCA is utilizing tetragon integrated with Tekton to create runtime attestation to attest artifact and builder attributes U: Runtime observability diff --git a/api/v1/flow/README.md b/api/v1/flow/README.md index 8c665dc4c2d30..1adb360e6f81e 100644 --- a/api/v1/flow/README.md +++ b/api/v1/flow/README.md @@ -24,6 +24,7 @@ - [ICMPv6](#flow-ICMPv6) - [IP](#flow-IP) - [IPCacheNotification](#flow-IPCacheNotification) + - [IPTraceID](#flow-IPTraceID) - [Kafka](#flow-Kafka) - [Layer4](#flow-Layer4) - [Layer7](#flow-Layer7) @@ -312,6 +313,7 @@ EventTypeFilter is a filter describing a particular event type. | trace_observation_point | [TraceObservationPoint](#flow-TraceObservationPoint) | | Only applicable to cilium trace notifications, blank for other types. | | trace_reason | [TraceReason](#flow-TraceReason) | | Cilium datapath trace reason info. | | file | [FileInfo](#flow-FileInfo) | | Cilium datapath filename and line number. Currently only applicable when Verdict = DROPPED. | +| ip_trace_id | [IPTraceID](#flow-IPTraceID) | | IPTraceID relates to the trace ID in the IP options of a packet. | | drop_reason_desc | [DropReason](#flow-DropReason) | | only applicable to Verdict = DROPPED. | | is_reply | [google.protobuf.BoolValue](#google-protobuf-BoolValue) | | is_reply indicates that this was a packet (L4) or message (L7) in the reply direction. May be absent (in which case it is unknown whether it is a reply or not). | | debug_capture_point | [DebugCapturePoint](#flow-DebugCapturePoint) | | Only applicable to cilium debug capture events, blank for other types | @@ -381,6 +383,7 @@ multiple fields are set, then all fields must match for the filter to match. | node_labels | [string](#string) | repeated | node_labels filters on a list of node label selectors. Selectors support the full Kubernetes label selector syntax. | | ip_version | [IPVersion](#flow-IPVersion) | repeated | filter based on IP version (ipv4 or ipv6) | | trace_id | [string](#string) | repeated | trace_id filters flows by trace ID | +| ip_trace_id | [uint64](#uint64) | repeated | ip_trace_id filters flows by IPTraceID | | experimental | [FlowFilter.Experimental](#flow-FlowFilter-Experimental) | | experimental contains filters that are not stable yet. Support for experimental features is always optional and subject to change. | @@ -512,6 +515,22 @@ L7 information for HTTP flows. It corresponds to Cilium's [accesslog.LogReco + + +### IPTraceID + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| trace_id | [uint64](#uint64) | | | +| ip_option_type | [uint32](#uint32) | | | + + + + + + ### Kafka @@ -581,6 +600,8 @@ that happened before the events were captured by Hubble. | source | [LostEventSource](#flow-LostEventSource) | | source is the location where events got lost. | | num_events_lost | [uint64](#uint64) | | num_events_lost is the number of events that haven been lost at source. | | cpu | [google.protobuf.Int32Value](#google-protobuf-Int32Value) | | cpu on which the event was lost if the source of lost events is PERF_EVENT_RING_BUFFER. | +| first | [google.protobuf.Timestamp](#google-protobuf-Timestamp) | | first is the timestamp of the first event that was lost. | +| last | [google.protobuf.Timestamp](#google-protobuf-Timestamp) | | last is the timestamp of the last event that was lost. | diff --git a/api/v1/flow/flow.pb.go b/api/v1/flow/flow.pb.go index 01608c899f096..e441a499480e3 100644 --- a/api/v1/flow/flow.pb.go +++ b/api/v1/flow/flow.pb.go @@ -4,7 +4,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.8 -// protoc v6.32.0 +// protoc v6.32.1 // source: flow/flow.proto package flow @@ -1499,6 +1499,8 @@ type Flow struct { // Cilium datapath filename and line number. Currently only applicable when // Verdict = DROPPED. File *FileInfo `protobuf:"bytes,38,opt,name=file,proto3" json:"file,omitempty"` + // IPTraceID relates to the trace ID in the IP options of a packet. + IpTraceId *IPTraceID `protobuf:"bytes,40,opt,name=ip_trace_id,json=ipTraceId,proto3" json:"ip_trace_id,omitempty"` // only applicable to Verdict = DROPPED. DropReasonDesc DropReason `protobuf:"varint,25,opt,name=drop_reason_desc,json=dropReasonDesc,proto3,enum=flow.DropReason" json:"drop_reason_desc,omitempty"` // is_reply indicates that this was a packet (L4) or message (L7) in the @@ -1764,6 +1766,13 @@ func (x *Flow) GetFile() *FileInfo { return nil } +func (x *Flow) GetIpTraceId() *IPTraceID { + if x != nil { + return x.IpTraceId + } + return nil +} + func (x *Flow) GetDropReasonDesc() DropReason { if x != nil { return x.DropReasonDesc @@ -3286,6 +3295,8 @@ type FlowFilter struct { IpVersion []IPVersion `protobuf:"varint,25,rep,packed,name=ip_version,json=ipVersion,proto3,enum=flow.IPVersion" json:"ip_version,omitempty"` // trace_id filters flows by trace ID TraceId []string `protobuf:"bytes,28,rep,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // ip_trace_id filters flows by IPTraceID + IpTraceId []uint64 `protobuf:"varint,39,rep,packed,name=ip_trace_id,json=ipTraceId,proto3" json:"ip_trace_id,omitempty"` // experimental contains filters that are not stable yet. Support for // experimental features is always optional and subject to change. Experimental *FlowFilter_Experimental `protobuf:"bytes,999,opt,name=experimental,proto3" json:"experimental,omitempty"` @@ -3589,6 +3600,13 @@ func (x *FlowFilter) GetTraceId() []string { return nil } +func (x *FlowFilter) GetIpTraceId() []uint64 { + if x != nil { + return x.IpTraceId + } + return nil +} + func (x *FlowFilter) GetExperimental() *FlowFilter_Experimental { if x != nil { return x.Experimental @@ -3970,6 +3988,58 @@ func (x *Service) GetNamespace() string { return "" } +type IPTraceID struct { + state protoimpl.MessageState `protogen:"open.v1"` + TraceId uint64 `protobuf:"varint,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + IpOptionType uint32 `protobuf:"varint,2,opt,name=ip_option_type,json=ipOptionType,proto3" json:"ip_option_type,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *IPTraceID) Reset() { + *x = IPTraceID{} + mi := &file_flow_flow_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IPTraceID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IPTraceID) ProtoMessage() {} + +func (x *IPTraceID) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IPTraceID.ProtoReflect.Descriptor instead. +func (*IPTraceID) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{26} +} + +func (x *IPTraceID) GetTraceId() uint64 { + if x != nil { + return x.TraceId + } + return 0 +} + +func (x *IPTraceID) GetIpOptionType() uint32 { + if x != nil { + return x.IpOptionType + } + return 0 +} + // LostEvent is a message which notifies consumers about a loss of events // that happened before the events were captured by Hubble. type LostEvent struct { @@ -3980,14 +4050,18 @@ type LostEvent struct { NumEventsLost uint64 `protobuf:"varint,2,opt,name=num_events_lost,json=numEventsLost,proto3" json:"num_events_lost,omitempty"` // cpu on which the event was lost if the source of lost events is // PERF_EVENT_RING_BUFFER. - Cpu *wrapperspb.Int32Value `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` + Cpu *wrapperspb.Int32Value `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` + // first is the timestamp of the first event that was lost. + First *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=first,proto3" json:"first,omitempty"` + // last is the timestamp of the last event that was lost. + Last *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last,proto3" json:"last,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *LostEvent) Reset() { *x = LostEvent{} - mi := &file_flow_flow_proto_msgTypes[26] + mi := &file_flow_flow_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3999,7 +4073,7 @@ func (x *LostEvent) String() string { func (*LostEvent) ProtoMessage() {} func (x *LostEvent) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[26] + mi := &file_flow_flow_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4012,7 +4086,7 @@ func (x *LostEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use LostEvent.ProtoReflect.Descriptor instead. func (*LostEvent) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{26} + return file_flow_flow_proto_rawDescGZIP(), []int{27} } func (x *LostEvent) GetSource() LostEventSource { @@ -4036,6 +4110,20 @@ func (x *LostEvent) GetCpu() *wrapperspb.Int32Value { return nil } +func (x *LostEvent) GetFirst() *timestamppb.Timestamp { + if x != nil { + return x.First + } + return nil +} + +func (x *LostEvent) GetLast() *timestamppb.Timestamp { + if x != nil { + return x.Last + } + return nil +} + type AgentEvent struct { state protoimpl.MessageState `protogen:"open.v1"` Type AgentEventType `protobuf:"varint,1,opt,name=type,proto3,enum=flow.AgentEventType" json:"type,omitempty"` @@ -4056,7 +4144,7 @@ type AgentEvent struct { func (x *AgentEvent) Reset() { *x = AgentEvent{} - mi := &file_flow_flow_proto_msgTypes[27] + mi := &file_flow_flow_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4068,7 +4156,7 @@ func (x *AgentEvent) String() string { func (*AgentEvent) ProtoMessage() {} func (x *AgentEvent) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[27] + mi := &file_flow_flow_proto_msgTypes[28] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4081,7 +4169,7 @@ func (x *AgentEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use AgentEvent.ProtoReflect.Descriptor instead. func (*AgentEvent) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{27} + return file_flow_flow_proto_rawDescGZIP(), []int{28} } func (x *AgentEvent) GetType() AgentEventType { @@ -4240,7 +4328,7 @@ type AgentEventUnknown struct { func (x *AgentEventUnknown) Reset() { *x = AgentEventUnknown{} - mi := &file_flow_flow_proto_msgTypes[28] + mi := &file_flow_flow_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4252,7 +4340,7 @@ func (x *AgentEventUnknown) String() string { func (*AgentEventUnknown) ProtoMessage() {} func (x *AgentEventUnknown) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[28] + mi := &file_flow_flow_proto_msgTypes[29] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4265,7 +4353,7 @@ func (x *AgentEventUnknown) ProtoReflect() protoreflect.Message { // Deprecated: Use AgentEventUnknown.ProtoReflect.Descriptor instead. func (*AgentEventUnknown) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{28} + return file_flow_flow_proto_rawDescGZIP(), []int{29} } func (x *AgentEventUnknown) GetType() string { @@ -4291,7 +4379,7 @@ type TimeNotification struct { func (x *TimeNotification) Reset() { *x = TimeNotification{} - mi := &file_flow_flow_proto_msgTypes[29] + mi := &file_flow_flow_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4303,7 +4391,7 @@ func (x *TimeNotification) String() string { func (*TimeNotification) ProtoMessage() {} func (x *TimeNotification) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[29] + mi := &file_flow_flow_proto_msgTypes[30] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4316,7 +4404,7 @@ func (x *TimeNotification) ProtoReflect() protoreflect.Message { // Deprecated: Use TimeNotification.ProtoReflect.Descriptor instead. func (*TimeNotification) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{29} + return file_flow_flow_proto_rawDescGZIP(), []int{30} } func (x *TimeNotification) GetTime() *timestamppb.Timestamp { @@ -4337,7 +4425,7 @@ type PolicyUpdateNotification struct { func (x *PolicyUpdateNotification) Reset() { *x = PolicyUpdateNotification{} - mi := &file_flow_flow_proto_msgTypes[30] + mi := &file_flow_flow_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4349,7 +4437,7 @@ func (x *PolicyUpdateNotification) String() string { func (*PolicyUpdateNotification) ProtoMessage() {} func (x *PolicyUpdateNotification) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[30] + mi := &file_flow_flow_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4362,7 +4450,7 @@ func (x *PolicyUpdateNotification) ProtoReflect() protoreflect.Message { // Deprecated: Use PolicyUpdateNotification.ProtoReflect.Descriptor instead. func (*PolicyUpdateNotification) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{30} + return file_flow_flow_proto_rawDescGZIP(), []int{31} } func (x *PolicyUpdateNotification) GetLabels() []string { @@ -4397,7 +4485,7 @@ type EndpointRegenNotification struct { func (x *EndpointRegenNotification) Reset() { *x = EndpointRegenNotification{} - mi := &file_flow_flow_proto_msgTypes[31] + mi := &file_flow_flow_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4409,7 +4497,7 @@ func (x *EndpointRegenNotification) String() string { func (*EndpointRegenNotification) ProtoMessage() {} func (x *EndpointRegenNotification) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[31] + mi := &file_flow_flow_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4422,7 +4510,7 @@ func (x *EndpointRegenNotification) ProtoReflect() protoreflect.Message { // Deprecated: Use EndpointRegenNotification.ProtoReflect.Descriptor instead. func (*EndpointRegenNotification) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{31} + return file_flow_flow_proto_rawDescGZIP(), []int{32} } func (x *EndpointRegenNotification) GetId() uint64 { @@ -4459,7 +4547,7 @@ type EndpointUpdateNotification struct { func (x *EndpointUpdateNotification) Reset() { *x = EndpointUpdateNotification{} - mi := &file_flow_flow_proto_msgTypes[32] + mi := &file_flow_flow_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4471,7 +4559,7 @@ func (x *EndpointUpdateNotification) String() string { func (*EndpointUpdateNotification) ProtoMessage() {} func (x *EndpointUpdateNotification) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[32] + mi := &file_flow_flow_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4484,7 +4572,7 @@ func (x *EndpointUpdateNotification) ProtoReflect() protoreflect.Message { // Deprecated: Use EndpointUpdateNotification.ProtoReflect.Descriptor instead. func (*EndpointUpdateNotification) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{32} + return file_flow_flow_proto_rawDescGZIP(), []int{33} } func (x *EndpointUpdateNotification) GetId() uint64 { @@ -4538,7 +4626,7 @@ type IPCacheNotification struct { func (x *IPCacheNotification) Reset() { *x = IPCacheNotification{} - mi := &file_flow_flow_proto_msgTypes[33] + mi := &file_flow_flow_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4550,7 +4638,7 @@ func (x *IPCacheNotification) String() string { func (*IPCacheNotification) ProtoMessage() {} func (x *IPCacheNotification) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[33] + mi := &file_flow_flow_proto_msgTypes[34] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4563,7 +4651,7 @@ func (x *IPCacheNotification) ProtoReflect() protoreflect.Message { // Deprecated: Use IPCacheNotification.ProtoReflect.Descriptor instead. func (*IPCacheNotification) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{33} + return file_flow_flow_proto_rawDescGZIP(), []int{34} } func (x *IPCacheNotification) GetCidr() string { @@ -4633,7 +4721,7 @@ type ServiceUpsertNotificationAddr struct { func (x *ServiceUpsertNotificationAddr) Reset() { *x = ServiceUpsertNotificationAddr{} - mi := &file_flow_flow_proto_msgTypes[34] + mi := &file_flow_flow_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4645,7 +4733,7 @@ func (x *ServiceUpsertNotificationAddr) String() string { func (*ServiceUpsertNotificationAddr) ProtoMessage() {} func (x *ServiceUpsertNotificationAddr) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[34] + mi := &file_flow_flow_proto_msgTypes[35] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4658,7 +4746,7 @@ func (x *ServiceUpsertNotificationAddr) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceUpsertNotificationAddr.ProtoReflect.Descriptor instead. func (*ServiceUpsertNotificationAddr) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{34} + return file_flow_flow_proto_rawDescGZIP(), []int{35} } func (x *ServiceUpsertNotificationAddr) GetIp() string { @@ -4694,7 +4782,7 @@ type ServiceUpsertNotification struct { func (x *ServiceUpsertNotification) Reset() { *x = ServiceUpsertNotification{} - mi := &file_flow_flow_proto_msgTypes[35] + mi := &file_flow_flow_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4706,7 +4794,7 @@ func (x *ServiceUpsertNotification) String() string { func (*ServiceUpsertNotification) ProtoMessage() {} func (x *ServiceUpsertNotification) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[35] + mi := &file_flow_flow_proto_msgTypes[36] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4719,7 +4807,7 @@ func (x *ServiceUpsertNotification) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceUpsertNotification.ProtoReflect.Descriptor instead. func (*ServiceUpsertNotification) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{35} + return file_flow_flow_proto_rawDescGZIP(), []int{36} } func (x *ServiceUpsertNotification) GetId() uint32 { @@ -4796,7 +4884,7 @@ type ServiceDeleteNotification struct { func (x *ServiceDeleteNotification) Reset() { *x = ServiceDeleteNotification{} - mi := &file_flow_flow_proto_msgTypes[36] + mi := &file_flow_flow_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4808,7 +4896,7 @@ func (x *ServiceDeleteNotification) String() string { func (*ServiceDeleteNotification) ProtoMessage() {} func (x *ServiceDeleteNotification) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[36] + mi := &file_flow_flow_proto_msgTypes[37] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4821,7 +4909,7 @@ func (x *ServiceDeleteNotification) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceDeleteNotification.ProtoReflect.Descriptor instead. func (*ServiceDeleteNotification) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{36} + return file_flow_flow_proto_rawDescGZIP(), []int{37} } func (x *ServiceDeleteNotification) GetId() uint32 { @@ -4841,7 +4929,7 @@ type NetworkInterface struct { func (x *NetworkInterface) Reset() { *x = NetworkInterface{} - mi := &file_flow_flow_proto_msgTypes[37] + mi := &file_flow_flow_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4853,7 +4941,7 @@ func (x *NetworkInterface) String() string { func (*NetworkInterface) ProtoMessage() {} func (x *NetworkInterface) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[37] + mi := &file_flow_flow_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4866,7 +4954,7 @@ func (x *NetworkInterface) ProtoReflect() protoreflect.Message { // Deprecated: Use NetworkInterface.ProtoReflect.Descriptor instead. func (*NetworkInterface) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{37} + return file_flow_flow_proto_rawDescGZIP(), []int{38} } func (x *NetworkInterface) GetIndex() uint32 { @@ -4899,7 +4987,7 @@ type DebugEvent struct { func (x *DebugEvent) Reset() { *x = DebugEvent{} - mi := &file_flow_flow_proto_msgTypes[38] + mi := &file_flow_flow_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4911,7 +4999,7 @@ func (x *DebugEvent) String() string { func (*DebugEvent) ProtoMessage() {} func (x *DebugEvent) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[38] + mi := &file_flow_flow_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4924,7 +5012,7 @@ func (x *DebugEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use DebugEvent.ProtoReflect.Descriptor instead. func (*DebugEvent) Descriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{38} + return file_flow_flow_proto_rawDescGZIP(), []int{39} } func (x *DebugEvent) GetType() DebugEventType { @@ -5003,7 +5091,7 @@ type FlowFilter_Experimental struct { func (x *FlowFilter_Experimental) Reset() { *x = FlowFilter_Experimental{} - mi := &file_flow_flow_proto_msgTypes[39] + mi := &file_flow_flow_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5015,7 +5103,7 @@ func (x *FlowFilter_Experimental) String() string { func (*FlowFilter_Experimental) ProtoMessage() {} func (x *FlowFilter_Experimental) ProtoReflect() protoreflect.Message { - mi := &file_flow_flow_proto_msgTypes[39] + mi := &file_flow_flow_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5042,7 +5130,7 @@ var File_flow_flow_proto protoreflect.FileDescriptor const file_flow_flow_proto_rawDesc = "" + "\n" + - "\x0fflow/flow.proto\x12\x04flow\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xb4\x0f\n" + + "\x0fflow/flow.proto\x12\x04flow\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xe5\x0f\n" + "\x04Flow\x12.\n" + "\x04time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\x04time\x12\x12\n" + "\x04uuid\x18\" \x01(\tR\x04uuid\x12'\n" + @@ -5073,7 +5161,8 @@ const file_flow_flow_proto_rawDesc = "" + "\x11policy_match_type\x18\x17 \x01(\rR\x0fpolicyMatchType\x12S\n" + "\x17trace_observation_point\x18\x18 \x01(\x0e2\x1b.flow.TraceObservationPointR\x15traceObservationPoint\x124\n" + "\ftrace_reason\x18$ \x01(\x0e2\x11.flow.TraceReasonR\vtraceReason\x12\"\n" + - "\x04file\x18& \x01(\v2\x0e.flow.FileInfoR\x04file\x12:\n" + + "\x04file\x18& \x01(\v2\x0e.flow.FileInfoR\x04file\x12/\n" + + "\vip_trace_id\x18( \x01(\v2\x0f.flow.IPTraceIDR\tipTraceId\x12:\n" + "\x10drop_reason_desc\x18\x19 \x01(\x0e2\x10.flow.DropReasonR\x0edropReasonDesc\x125\n" + "\bis_reply\x18\x1a \x01(\v2\x1a.google.protobuf.BoolValueR\aisReply\x12G\n" + "\x13debug_capture_point\x18\x1b \x01(\x0e2\x17.flow.DebugCapturePointR\x11debugCapturePoint\x124\n" + @@ -5189,7 +5278,7 @@ const file_flow_flow_proto_rawDesc = "" + "\bsub_type\x18\x03 \x01(\x05R\asubType\"@\n" + "\x0fCiliumEventType\x12\x12\n" + "\x04type\x18\x01 \x01(\x05R\x04type\x12\x19\n" + - "\bsub_type\x18\x02 \x01(\x05R\asubType\"\xa4\r\n" + + "\bsub_type\x18\x02 \x01(\x05R\asubType\"\xc4\r\n" + "\n" + "FlowFilter\x12\x12\n" + "\x04uuid\x18\x1d \x03(\tR\x04uuid\x12\x1b\n" + @@ -5238,7 +5327,8 @@ const file_flow_flow_proto_rawDesc = "" + "nodeLabels\x12.\n" + "\n" + "ip_version\x18\x19 \x03(\x0e2\x0f.flow.IPVersionR\tipVersion\x12\x19\n" + - "\btrace_id\x18\x1c \x03(\tR\atraceId\x12B\n" + + "\btrace_id\x18\x1c \x03(\tR\atraceId\x12\x1e\n" + + "\vip_trace_id\x18' \x03(\x04R\tipTraceId\x12B\n" + "\fexperimental\x18\xe7\a \x01(\v2\x1d.flow.FlowFilter.ExperimentalR\fexperimental\x1a5\n" + "\fExperimental\x12%\n" + "\x0ecel_expression\x18\x01 \x03(\tR\rcelExpression\"\xce\x01\n" + @@ -5271,11 +5361,16 @@ const file_flow_flow_proto_rawDesc = "" + "\x05topic\x18\x05 \x01(\tR\x05topic\";\n" + "\aService\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1c\n" + - "\tnamespace\x18\x02 \x01(\tR\tnamespace\"\x91\x01\n" + + "\tnamespace\x18\x02 \x01(\tR\tnamespace\"L\n" + + "\tIPTraceID\x12\x19\n" + + "\btrace_id\x18\x01 \x01(\x04R\atraceId\x12$\n" + + "\x0eip_option_type\x18\x02 \x01(\rR\fipOptionType\"\xf3\x01\n" + "\tLostEvent\x12-\n" + "\x06source\x18\x01 \x01(\x0e2\x15.flow.LostEventSourceR\x06source\x12&\n" + "\x0fnum_events_lost\x18\x02 \x01(\x04R\rnumEventsLost\x12-\n" + - "\x03cpu\x18\x03 \x01(\v2\x1b.google.protobuf.Int32ValueR\x03cpu\"\xfe\x04\n" + + "\x03cpu\x18\x03 \x01(\v2\x1b.google.protobuf.Int32ValueR\x03cpu\x120\n" + + "\x05first\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\x05first\x12.\n" + + "\x04last\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\x04last\"\xfe\x04\n" + "\n" + "AgentEvent\x12(\n" + "\x04type\x18\x01 \x01(\x0e2\x14.flow.AgentEventTypeR\x04type\x123\n" + @@ -5624,7 +5719,7 @@ func file_flow_flow_proto_rawDescGZIP() []byte { } var file_flow_flow_proto_enumTypes = make([]protoimpl.EnumInfo, 16) -var file_flow_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 40) +var file_flow_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 41) var file_flow_flow_proto_goTypes = []any{ (FlowType)(0), // 0: flow.FlowType (AuthType)(0), // 1: flow.AuthType @@ -5668,28 +5763,29 @@ var file_flow_flow_proto_goTypes = []any{ (*HTTP)(nil), // 39: flow.HTTP (*Kafka)(nil), // 40: flow.Kafka (*Service)(nil), // 41: flow.Service - (*LostEvent)(nil), // 42: flow.LostEvent - (*AgentEvent)(nil), // 43: flow.AgentEvent - (*AgentEventUnknown)(nil), // 44: flow.AgentEventUnknown - (*TimeNotification)(nil), // 45: flow.TimeNotification - (*PolicyUpdateNotification)(nil), // 46: flow.PolicyUpdateNotification - (*EndpointRegenNotification)(nil), // 47: flow.EndpointRegenNotification - (*EndpointUpdateNotification)(nil), // 48: flow.EndpointUpdateNotification - (*IPCacheNotification)(nil), // 49: flow.IPCacheNotification - (*ServiceUpsertNotificationAddr)(nil), // 50: flow.ServiceUpsertNotificationAddr - (*ServiceUpsertNotification)(nil), // 51: flow.ServiceUpsertNotification - (*ServiceDeleteNotification)(nil), // 52: flow.ServiceDeleteNotification - (*NetworkInterface)(nil), // 53: flow.NetworkInterface - (*DebugEvent)(nil), // 54: flow.DebugEvent - (*FlowFilter_Experimental)(nil), // 55: flow.FlowFilter.Experimental - (*timestamppb.Timestamp)(nil), // 56: google.protobuf.Timestamp - (*wrapperspb.BoolValue)(nil), // 57: google.protobuf.BoolValue - (*anypb.Any)(nil), // 58: google.protobuf.Any - (*wrapperspb.Int32Value)(nil), // 59: google.protobuf.Int32Value - (*wrapperspb.UInt32Value)(nil), // 60: google.protobuf.UInt32Value + (*IPTraceID)(nil), // 42: flow.IPTraceID + (*LostEvent)(nil), // 43: flow.LostEvent + (*AgentEvent)(nil), // 44: flow.AgentEvent + (*AgentEventUnknown)(nil), // 45: flow.AgentEventUnknown + (*TimeNotification)(nil), // 46: flow.TimeNotification + (*PolicyUpdateNotification)(nil), // 47: flow.PolicyUpdateNotification + (*EndpointRegenNotification)(nil), // 48: flow.EndpointRegenNotification + (*EndpointUpdateNotification)(nil), // 49: flow.EndpointUpdateNotification + (*IPCacheNotification)(nil), // 50: flow.IPCacheNotification + (*ServiceUpsertNotificationAddr)(nil), // 51: flow.ServiceUpsertNotificationAddr + (*ServiceUpsertNotification)(nil), // 52: flow.ServiceUpsertNotification + (*ServiceDeleteNotification)(nil), // 53: flow.ServiceDeleteNotification + (*NetworkInterface)(nil), // 54: flow.NetworkInterface + (*DebugEvent)(nil), // 55: flow.DebugEvent + (*FlowFilter_Experimental)(nil), // 56: flow.FlowFilter.Experimental + (*timestamppb.Timestamp)(nil), // 57: google.protobuf.Timestamp + (*wrapperspb.BoolValue)(nil), // 58: google.protobuf.BoolValue + (*anypb.Any)(nil), // 59: google.protobuf.Any + (*wrapperspb.Int32Value)(nil), // 60: google.protobuf.Int32Value + (*wrapperspb.UInt32Value)(nil), // 61: google.protobuf.UInt32Value } var file_flow_flow_proto_depIdxs = []int32{ - 56, // 0: flow.Flow.time:type_name -> google.protobuf.Timestamp + 57, // 0: flow.Flow.time:type_name -> google.protobuf.Timestamp 6, // 1: flow.Flow.verdict:type_name -> flow.Verdict 1, // 2: flow.Flow.auth_type:type_name -> flow.AuthType 26, // 3: flow.Flow.ethernet:type_name -> flow.Ethernet @@ -5707,72 +5803,75 @@ var file_flow_flow_proto_depIdxs = []int32{ 2, // 15: flow.Flow.trace_observation_point:type_name -> flow.TraceObservationPoint 3, // 16: flow.Flow.trace_reason:type_name -> flow.TraceReason 17, // 17: flow.Flow.file:type_name -> flow.FileInfo - 7, // 18: flow.Flow.drop_reason_desc:type_name -> flow.DropReason - 57, // 19: flow.Flow.is_reply:type_name -> google.protobuf.BoolValue - 9, // 20: flow.Flow.debug_capture_point:type_name -> flow.DebugCapturePoint - 53, // 21: flow.Flow.interface:type_name -> flow.NetworkInterface - 20, // 22: flow.Flow.trace_context:type_name -> flow.TraceContext - 13, // 23: flow.Flow.sock_xlate_point:type_name -> flow.SocketTranslationPoint - 58, // 24: flow.Flow.extensions:type_name -> google.protobuf.Any - 33, // 25: flow.Flow.egress_allowed_by:type_name -> flow.Policy - 33, // 26: flow.Flow.ingress_allowed_by:type_name -> flow.Policy - 33, // 27: flow.Flow.egress_denied_by:type_name -> flow.Policy - 33, // 28: flow.Flow.ingress_denied_by:type_name -> flow.Policy - 24, // 29: flow.Layer4.TCP:type_name -> flow.TCP - 28, // 30: flow.Layer4.UDP:type_name -> flow.UDP - 30, // 31: flow.Layer4.ICMPv4:type_name -> flow.ICMPv4 - 31, // 32: flow.Layer4.ICMPv6:type_name -> flow.ICMPv6 - 29, // 33: flow.Layer4.SCTP:type_name -> flow.SCTP - 4, // 34: flow.Layer7.type:type_name -> flow.L7FlowType - 37, // 35: flow.Layer7.dns:type_name -> flow.DNS - 39, // 36: flow.Layer7.http:type_name -> flow.HTTP - 40, // 37: flow.Layer7.kafka:type_name -> flow.Kafka - 21, // 38: flow.TraceContext.parent:type_name -> flow.TraceParent - 23, // 39: flow.Endpoint.workloads:type_name -> flow.Workload - 27, // 40: flow.TCP.flags:type_name -> flow.TCPFlags - 5, // 41: flow.IP.ipVersion:type_name -> flow.IPVersion - 15, // 42: flow.Tunnel.protocol:type_name -> flow.Tunnel.Protocol - 25, // 43: flow.Tunnel.IP:type_name -> flow.IP - 18, // 44: flow.Tunnel.l4:type_name -> flow.Layer4 - 23, // 45: flow.FlowFilter.source_workload:type_name -> flow.Workload - 23, // 46: flow.FlowFilter.destination_workload:type_name -> flow.Workload - 8, // 47: flow.FlowFilter.traffic_direction:type_name -> flow.TrafficDirection - 6, // 48: flow.FlowFilter.verdict:type_name -> flow.Verdict - 7, // 49: flow.FlowFilter.drop_reason_desc:type_name -> flow.DropReason - 53, // 50: flow.FlowFilter.interface:type_name -> flow.NetworkInterface - 34, // 51: flow.FlowFilter.event_type:type_name -> flow.EventTypeFilter - 38, // 52: flow.FlowFilter.http_header:type_name -> flow.HTTPHeader - 27, // 53: flow.FlowFilter.tcp_flags:type_name -> flow.TCPFlags - 5, // 54: flow.FlowFilter.ip_version:type_name -> flow.IPVersion - 55, // 55: flow.FlowFilter.experimental:type_name -> flow.FlowFilter.Experimental - 38, // 56: flow.HTTP.headers:type_name -> flow.HTTPHeader - 11, // 57: flow.LostEvent.source:type_name -> flow.LostEventSource - 59, // 58: flow.LostEvent.cpu:type_name -> google.protobuf.Int32Value - 12, // 59: flow.AgentEvent.type:type_name -> flow.AgentEventType - 44, // 60: flow.AgentEvent.unknown:type_name -> flow.AgentEventUnknown - 45, // 61: flow.AgentEvent.agent_start:type_name -> flow.TimeNotification - 46, // 62: flow.AgentEvent.policy_update:type_name -> flow.PolicyUpdateNotification - 47, // 63: flow.AgentEvent.endpoint_regenerate:type_name -> flow.EndpointRegenNotification - 48, // 64: flow.AgentEvent.endpoint_update:type_name -> flow.EndpointUpdateNotification - 49, // 65: flow.AgentEvent.ipcache_update:type_name -> flow.IPCacheNotification - 51, // 66: flow.AgentEvent.service_upsert:type_name -> flow.ServiceUpsertNotification - 52, // 67: flow.AgentEvent.service_delete:type_name -> flow.ServiceDeleteNotification - 56, // 68: flow.TimeNotification.time:type_name -> google.protobuf.Timestamp - 60, // 69: flow.IPCacheNotification.old_identity:type_name -> google.protobuf.UInt32Value - 50, // 70: flow.ServiceUpsertNotification.frontend_address:type_name -> flow.ServiceUpsertNotificationAddr - 50, // 71: flow.ServiceUpsertNotification.backend_addresses:type_name -> flow.ServiceUpsertNotificationAddr - 14, // 72: flow.DebugEvent.type:type_name -> flow.DebugEventType - 22, // 73: flow.DebugEvent.source:type_name -> flow.Endpoint - 60, // 74: flow.DebugEvent.hash:type_name -> google.protobuf.UInt32Value - 60, // 75: flow.DebugEvent.arg1:type_name -> google.protobuf.UInt32Value - 60, // 76: flow.DebugEvent.arg2:type_name -> google.protobuf.UInt32Value - 60, // 77: flow.DebugEvent.arg3:type_name -> google.protobuf.UInt32Value - 59, // 78: flow.DebugEvent.cpu:type_name -> google.protobuf.Int32Value - 79, // [79:79] is the sub-list for method output_type - 79, // [79:79] is the sub-list for method input_type - 79, // [79:79] is the sub-list for extension type_name - 79, // [79:79] is the sub-list for extension extendee - 0, // [0:79] is the sub-list for field type_name + 42, // 18: flow.Flow.ip_trace_id:type_name -> flow.IPTraceID + 7, // 19: flow.Flow.drop_reason_desc:type_name -> flow.DropReason + 58, // 20: flow.Flow.is_reply:type_name -> google.protobuf.BoolValue + 9, // 21: flow.Flow.debug_capture_point:type_name -> flow.DebugCapturePoint + 54, // 22: flow.Flow.interface:type_name -> flow.NetworkInterface + 20, // 23: flow.Flow.trace_context:type_name -> flow.TraceContext + 13, // 24: flow.Flow.sock_xlate_point:type_name -> flow.SocketTranslationPoint + 59, // 25: flow.Flow.extensions:type_name -> google.protobuf.Any + 33, // 26: flow.Flow.egress_allowed_by:type_name -> flow.Policy + 33, // 27: flow.Flow.ingress_allowed_by:type_name -> flow.Policy + 33, // 28: flow.Flow.egress_denied_by:type_name -> flow.Policy + 33, // 29: flow.Flow.ingress_denied_by:type_name -> flow.Policy + 24, // 30: flow.Layer4.TCP:type_name -> flow.TCP + 28, // 31: flow.Layer4.UDP:type_name -> flow.UDP + 30, // 32: flow.Layer4.ICMPv4:type_name -> flow.ICMPv4 + 31, // 33: flow.Layer4.ICMPv6:type_name -> flow.ICMPv6 + 29, // 34: flow.Layer4.SCTP:type_name -> flow.SCTP + 4, // 35: flow.Layer7.type:type_name -> flow.L7FlowType + 37, // 36: flow.Layer7.dns:type_name -> flow.DNS + 39, // 37: flow.Layer7.http:type_name -> flow.HTTP + 40, // 38: flow.Layer7.kafka:type_name -> flow.Kafka + 21, // 39: flow.TraceContext.parent:type_name -> flow.TraceParent + 23, // 40: flow.Endpoint.workloads:type_name -> flow.Workload + 27, // 41: flow.TCP.flags:type_name -> flow.TCPFlags + 5, // 42: flow.IP.ipVersion:type_name -> flow.IPVersion + 15, // 43: flow.Tunnel.protocol:type_name -> flow.Tunnel.Protocol + 25, // 44: flow.Tunnel.IP:type_name -> flow.IP + 18, // 45: flow.Tunnel.l4:type_name -> flow.Layer4 + 23, // 46: flow.FlowFilter.source_workload:type_name -> flow.Workload + 23, // 47: flow.FlowFilter.destination_workload:type_name -> flow.Workload + 8, // 48: flow.FlowFilter.traffic_direction:type_name -> flow.TrafficDirection + 6, // 49: flow.FlowFilter.verdict:type_name -> flow.Verdict + 7, // 50: flow.FlowFilter.drop_reason_desc:type_name -> flow.DropReason + 54, // 51: flow.FlowFilter.interface:type_name -> flow.NetworkInterface + 34, // 52: flow.FlowFilter.event_type:type_name -> flow.EventTypeFilter + 38, // 53: flow.FlowFilter.http_header:type_name -> flow.HTTPHeader + 27, // 54: flow.FlowFilter.tcp_flags:type_name -> flow.TCPFlags + 5, // 55: flow.FlowFilter.ip_version:type_name -> flow.IPVersion + 56, // 56: flow.FlowFilter.experimental:type_name -> flow.FlowFilter.Experimental + 38, // 57: flow.HTTP.headers:type_name -> flow.HTTPHeader + 11, // 58: flow.LostEvent.source:type_name -> flow.LostEventSource + 60, // 59: flow.LostEvent.cpu:type_name -> google.protobuf.Int32Value + 57, // 60: flow.LostEvent.first:type_name -> google.protobuf.Timestamp + 57, // 61: flow.LostEvent.last:type_name -> google.protobuf.Timestamp + 12, // 62: flow.AgentEvent.type:type_name -> flow.AgentEventType + 45, // 63: flow.AgentEvent.unknown:type_name -> flow.AgentEventUnknown + 46, // 64: flow.AgentEvent.agent_start:type_name -> flow.TimeNotification + 47, // 65: flow.AgentEvent.policy_update:type_name -> flow.PolicyUpdateNotification + 48, // 66: flow.AgentEvent.endpoint_regenerate:type_name -> flow.EndpointRegenNotification + 49, // 67: flow.AgentEvent.endpoint_update:type_name -> flow.EndpointUpdateNotification + 50, // 68: flow.AgentEvent.ipcache_update:type_name -> flow.IPCacheNotification + 52, // 69: flow.AgentEvent.service_upsert:type_name -> flow.ServiceUpsertNotification + 53, // 70: flow.AgentEvent.service_delete:type_name -> flow.ServiceDeleteNotification + 57, // 71: flow.TimeNotification.time:type_name -> google.protobuf.Timestamp + 61, // 72: flow.IPCacheNotification.old_identity:type_name -> google.protobuf.UInt32Value + 51, // 73: flow.ServiceUpsertNotification.frontend_address:type_name -> flow.ServiceUpsertNotificationAddr + 51, // 74: flow.ServiceUpsertNotification.backend_addresses:type_name -> flow.ServiceUpsertNotificationAddr + 14, // 75: flow.DebugEvent.type:type_name -> flow.DebugEventType + 22, // 76: flow.DebugEvent.source:type_name -> flow.Endpoint + 61, // 77: flow.DebugEvent.hash:type_name -> google.protobuf.UInt32Value + 61, // 78: flow.DebugEvent.arg1:type_name -> google.protobuf.UInt32Value + 61, // 79: flow.DebugEvent.arg2:type_name -> google.protobuf.UInt32Value + 61, // 80: flow.DebugEvent.arg3:type_name -> google.protobuf.UInt32Value + 60, // 81: flow.DebugEvent.cpu:type_name -> google.protobuf.Int32Value + 82, // [82:82] is the sub-list for method output_type + 82, // [82:82] is the sub-list for method input_type + 82, // [82:82] is the sub-list for extension type_name + 82, // [82:82] is the sub-list for extension extendee + 0, // [0:82] is the sub-list for field type_name } func init() { file_flow_flow_proto_init() } @@ -5792,7 +5891,7 @@ func file_flow_flow_proto_init() { (*Layer7_Http)(nil), (*Layer7_Kafka)(nil), } - file_flow_flow_proto_msgTypes[27].OneofWrappers = []any{ + file_flow_flow_proto_msgTypes[28].OneofWrappers = []any{ (*AgentEvent_Unknown)(nil), (*AgentEvent_AgentStart)(nil), (*AgentEvent_PolicyUpdate)(nil), @@ -5808,7 +5907,7 @@ func file_flow_flow_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_flow_flow_proto_rawDesc), len(file_flow_flow_proto_rawDesc)), NumEnums: 16, - NumMessages: 40, + NumMessages: 41, NumExtensions: 0, NumServices: 0, }, diff --git a/api/v1/flow/flow.pb.json.go b/api/v1/flow/flow.pb.json.go index 0876b597cb162..1bc77a60c0130 100644 --- a/api/v1/flow/flow.pb.json.go +++ b/api/v1/flow/flow.pb.json.go @@ -334,6 +334,18 @@ func (msg *Service) UnmarshalJSON(b []byte) error { return protojson.UnmarshalOptions{}.Unmarshal(b, msg) } +// MarshalJSON implements json.Marshaler +func (msg *IPTraceID) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *IPTraceID) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{}.Unmarshal(b, msg) +} + // MarshalJSON implements json.Marshaler func (msg *LostEvent) MarshalJSON() ([]byte, error) { return protojson.MarshalOptions{ diff --git a/api/v1/flow/flow.proto b/api/v1/flow/flow.proto index 1b45dd2917af7..63821c8a88d8f 100644 --- a/api/v1/flow/flow.proto +++ b/api/v1/flow/flow.proto @@ -87,6 +87,9 @@ message Flow { // Verdict = DROPPED. FileInfo file = 38; + // IPTraceID relates to the trace ID in the IP options of a packet. + IPTraceID ip_trace_id = 40; + // only applicable to Verdict = DROPPED. DropReason drop_reason_desc = 25; @@ -207,7 +210,7 @@ enum TraceObservationPoint { // TO_CRYPTO indicates network packets are transmitted towards the crypto // process for encryption. TO_CRYPTO = 13; - + } enum TraceReason { @@ -640,6 +643,8 @@ message FlowFilter { // trace_id filters flows by trace ID repeated string trace_id = 28; + // ip_trace_id filters flows by IPTraceID + repeated uint64 ip_trace_id = 39; // Experimental contains filters that are not stable yet. Support for // experimental features is always optional and subject to change. @@ -721,6 +726,11 @@ message Service { string namespace = 2; } +message IPTraceID { + uint64 trace_id = 1; + uint32 ip_option_type = 2; +} + enum LostEventSource { UNKNOWN_LOST_EVENT_SOURCE = 0; // PERF_EVENT_RING_BUFFER indicates that events were dropped in the BPF @@ -747,6 +757,10 @@ message LostEvent { // cpu on which the event was lost if the source of lost events is // PERF_EVENT_RING_BUFFER. google.protobuf.Int32Value cpu = 3; + // first is the timestamp of the first event that was lost. + google.protobuf.Timestamp first = 4; + // last is the timestamp of the last event that was lost. + google.protobuf.Timestamp last = 5; } // AgentEventType is the type of agent event. These values are shared with type diff --git a/api/v1/models/cluster_mesh_status.go b/api/v1/models/cluster_mesh_status.go index 3835bbbf56689..f1669f69b82ac 100644 --- a/api/v1/models/cluster_mesh_status.go +++ b/api/v1/models/cluster_mesh_status.go @@ -26,9 +26,6 @@ type ClusterMeshStatus struct { // List of remote clusters Clusters []*RemoteCluster `json:"clusters"` - - // Number of global services - NumGlobalServices int64 `json:"num-global-services,omitempty"` } // Validate validates this cluster mesh status diff --git a/api/v1/observer/observer.pb.go b/api/v1/observer/observer.pb.go index 4c36303c847c4..689cc5c5afa38 100644 --- a/api/v1/observer/observer.pb.go +++ b/api/v1/observer/observer.pb.go @@ -4,7 +4,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.8 -// protoc v6.32.0 +// protoc v6.32.1 // source: observer/observer.proto package observer @@ -392,6 +392,7 @@ type HTTPHeader = flow.HTTPHeader type HTTP = flow.HTTP type Kafka = flow.Kafka type Service = flow.Service +type IPTraceID = flow.IPTraceID type LostEvent = flow.LostEvent type AgentEvent = flow.AgentEvent type AgentEvent_Unknown = flow.AgentEvent_Unknown diff --git a/api/v1/observer/observer_grpc.pb.go b/api/v1/observer/observer_grpc.pb.go index 8ad4b2e199a5e..890029b844ad0 100644 --- a/api/v1/observer/observer_grpc.pb.go +++ b/api/v1/observer/observer_grpc.pb.go @@ -4,7 +4,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.5.1 -// - protoc v6.32.0 +// - protoc v6.32.1 // source: observer/observer.proto package observer diff --git a/api/v1/openapi.yaml b/api/v1/openapi.yaml index 08cea6f14ad9e..1a1a6e980f84a 100644 --- a/api/v1/openapi.yaml +++ b/api/v1/openapi.yaml @@ -2339,9 +2339,6 @@ definitions: type: array items: "$ref": "#/definitions/RemoteCluster" - num-global-services: - description: Number of global services - type: integer RemoteCluster: description: |- Status of remote cluster diff --git a/api/v1/peer/peer.pb.go b/api/v1/peer/peer.pb.go index 94db6ba3a6487..bfa6f610e539a 100644 --- a/api/v1/peer/peer.pb.go +++ b/api/v1/peer/peer.pb.go @@ -4,7 +4,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.8 -// protoc v6.32.0 +// protoc v6.32.1 // source: peer/peer.proto package peer diff --git a/api/v1/peer/peer_grpc.pb.go b/api/v1/peer/peer_grpc.pb.go index 3f64d837e617f..f68ab95280e43 100644 --- a/api/v1/peer/peer_grpc.pb.go +++ b/api/v1/peer/peer_grpc.pb.go @@ -4,7 +4,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.5.1 -// - protoc v6.32.0 +// - protoc v6.32.1 // source: peer/peer.proto package peer diff --git a/api/v1/relay/relay.pb.go b/api/v1/relay/relay.pb.go index 6182650c44890..0ddec414ecd78 100644 --- a/api/v1/relay/relay.pb.go +++ b/api/v1/relay/relay.pb.go @@ -4,7 +4,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.8 -// protoc v6.32.0 +// protoc v6.32.1 // source: relay/relay.proto package relay diff --git a/api/v1/server/embedded_spec.go b/api/v1/server/embedded_spec.go index 057c7e7522010..00645d253fe88 100644 --- a/api/v1/server/embedded_spec.go +++ b/api/v1/server/embedded_spec.go @@ -2200,10 +2200,6 @@ func init() { "items": { "$ref": "#/definitions/RemoteCluster" } - }, - "num-global-services": { - "description": "Number of global services", - "type": "integer" } } }, @@ -7569,10 +7565,6 @@ func init() { "items": { "$ref": "#/definitions/RemoteCluster" } - }, - "num-global-services": { - "description": "Number of global services", - "type": "integer" } } }, diff --git a/api/v1/standalone-dns-proxy/standalone-dns-proxy.pb.go b/api/v1/standalone-dns-proxy/standalone-dns-proxy.pb.go index 14a841425a605..f2a634bb31d81 100644 --- a/api/v1/standalone-dns-proxy/standalone-dns-proxy.pb.go +++ b/api/v1/standalone-dns-proxy/standalone-dns-proxy.pb.go @@ -4,7 +4,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.8 -// protoc v6.32.0 +// protoc v6.32.1 // source: standalone-dns-proxy/standalone-dns-proxy.proto package standalonednsproxy diff --git a/api/v1/standalone-dns-proxy/standalone-dns-proxy_grpc.pb.go b/api/v1/standalone-dns-proxy/standalone-dns-proxy_grpc.pb.go index 232f07cbf8f50..3d03bd97d8eba 100644 --- a/api/v1/standalone-dns-proxy/standalone-dns-proxy_grpc.pb.go +++ b/api/v1/standalone-dns-proxy/standalone-dns-proxy_grpc.pb.go @@ -4,7 +4,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.5.1 -// - protoc v6.32.0 +// - protoc v6.32.1 // source: standalone-dns-proxy/standalone-dns-proxy.proto package standalonednsproxy diff --git a/bpf/Makefile b/bpf/Makefile index 2719c99439fd5..a1af5d24f5877 100644 --- a/bpf/Makefile +++ b/bpf/Makefile @@ -78,12 +78,12 @@ HOST_OPTIONS = $(LXC_OPTIONS) \ -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DTUNNEL_MODE:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER: \ -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DTUNNEL_MODE:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL: \ -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DTUNNEL_MODE:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE: \ - -DENABLE_IPV6:-DENCAP_IFINDEX:-DTUNNEL_MODE:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE:-DENABLE_SRV6:-DENABLE_MULTICAST:-DENCRYPTED_OVERLAY: \ - -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DTUNNEL_MODE:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE:-DENABLE_SRV6:-DENABLE_MULTICAST:-DENCRYPTED_OVERLAY: \ - -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DTUNNEL_MODE:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE:-DENABLE_SRV6:-DENABLE_SRV6_SRH_ENCAP:-DENABLE_SCTP:-DENABLE_MULTICAST:-DENCRYPTED_OVERLAY: \ - -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DTUNNEL_MODE:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE:-DENABLE_SRV6:-DENABLE_SRV6_SRH_ENCAP:-DENABLE_SCTP:-DENABLE_VTEP:-DENABLE_MULTICAST:-DENCRYPTED_OVERLAY: \ - -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DDSR_ENCAP_MODE:-DDSR_ENCAP_GENEVE:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE:-DENABLE_SRV6:-DENABLE_SRV6_SRH_ENCAP:-DENCRYPTED_OVERLAY:-DENABLE_SCTP:-DENABLE_VTEP:-DDSR_ENCAP_IPIP=2 \ - -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE:-DENABLE_SRV6:-DENABLE_SRV6_SRH_ENCAP:-DENCRYPTED_OVERLAY:-DENABLE_SCTP:-DENABLE_VTEP:-DENABLE_IPSEC: \ + -DENABLE_IPV6:-DENCAP_IFINDEX:-DTUNNEL_MODE:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE:-DENABLE_SRV6:-DENABLE_MULTICAST: \ + -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DTUNNEL_MODE:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE:-DENABLE_SRV6:-DENABLE_MULTICAST: \ + -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DTUNNEL_MODE:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE:-DENABLE_SRV6:-DENABLE_SRV6_SRH_ENCAP:-DENABLE_SCTP:-DENABLE_MULTICAST: \ + -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DTUNNEL_MODE:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE:-DENABLE_SRV6:-DENABLE_SRV6_SRH_ENCAP:-DENABLE_SCTP:-DENABLE_VTEP:-DENABLE_MULTICAST: \ + -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DDSR_ENCAP_MODE:-DDSR_ENCAP_GENEVE:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE:-DENABLE_SRV6:-DENABLE_SRV6_SRH_ENCAP:-DENABLE_SCTP:-DENABLE_VTEP:-DDSR_ENCAP_IPIP=2 \ + -DENABLE_IPV4:-DENABLE_IPV6:-DENCAP_IFINDEX:-DPOLICY_VERDICT_NOTIFY:-DENABLE_NODEPORT:-DENABLE_NODEPORT_ACCELERATION:-DENABLE_DSR:-DENABLE_DSR_HYBRID:-DENABLE_PREFILTER:-DENABLE_HOST_FIREWALL:-DENABLE_ICMP_RULE:-DENABLE_SRV6:-DENABLE_SRV6_SRH_ENCAP:-DENABLE_SCTP:-DENABLE_VTEP:-DENABLE_IPSEC: \ ifndef MAX_HOST_OPTIONS MAX_HOST_OPTIONS = $(MAX_BASE_OPTIONS) -DENCAP_IFINDEX=1 -DTUNNEL_MODE=1 -DENABLE_IPSEC=1 diff --git a/bpf/Makefile.bpf b/bpf/Makefile.bpf index 9c0a3b3f7a899..f4bab4ee947ff 100644 --- a/bpf/Makefile.bpf +++ b/bpf/Makefile.bpf @@ -49,7 +49,7 @@ force: # TODO: revert addition of ignore MACRO_ARG_REUSE below once cilium-checkpatch # image is updated to ignore it. # -CHECKPATCH_IMAGE := quay.io/cilium/cilium-checkpatch:1755701578-b97bd7a@sha256:5c4df794425e29962aac25fd5c005fd39d737232121688ba8d08878f4815b232 +CHECKPATCH_IMAGE := quay.io/cilium/cilium-checkpatch:1755701578-b97bd7a@sha256:f1332fa6edbbd40882a59ceae4a7843a4095bd62288363740e84b82708624c50 ifneq ($(CHECKPATCH_DEBUG),) # Run script with "bash -x" CHECKPATCH_IMAGE_AND_ENTRY := \ diff --git a/bpf/bpf_alignchecker.c b/bpf/bpf_alignchecker.c index c52673397669e..1c2aac4a5cc51 100644 --- a/bpf/bpf_alignchecker.c +++ b/bpf/bpf_alignchecker.c @@ -71,6 +71,7 @@ add_type(struct egress_gw_policy_entry); add_type(struct egress_gw_policy_key6); add_type(struct egress_gw_policy_entry6); add_type(struct vtep_key); +add_type(struct vtep_policy_key); add_type(struct vtep_value); add_type(struct srv6_vrf_key4); add_type(struct srv6_vrf_key6); diff --git a/bpf/bpf_host.c b/bpf/bpf_host.c index 86d156773f8d7..8fe75baeb652e 100644 --- a/bpf/bpf_host.c +++ b/bpf/bpf_host.c @@ -60,6 +60,10 @@ #define host_wg_encrypt_hook(ctx, proto, src_sec_identity) \ wg_maybe_redirect_to_encrypt(ctx, proto, src_sec_identity) +#ifndef tcx_early_hook +#define tcx_early_hook(ctx, proto) CTX_ACT_OK +#endif + /* Bit 0 is skipped for robustness, as it's used in some places to indicate from_host itself. */ #define FROM_HOST_FLAG_NEED_HOSTFW (1 << 1) #define FROM_HOST_FLAG_HOST_ID (1 << 2) @@ -347,14 +351,14 @@ handle_ipv6_cont(struct __ctx_buff *ctx, __u32 secctx, const bool from_host, return CTX_ACT_OK; #ifdef ENABLE_HOST_ROUTING - /* add L2 header for L2-less interface, such as cilium_wg0 */ - if (!from_host) { + /* add L2 header for L2-less interface: */ + if (!from_host && THIS_IS_L3_DEV) { bool l2_hdr_required = true; ret = maybe_add_l2_hdr(ctx, ep->ifindex, &l2_hdr_required); if (ret != 0) return ret; - if (l2_hdr_required && ETH_HLEN == 0) { + if (l2_hdr_required) { /* l2 header is added */ l3_off += __ETH_HLEN; } @@ -394,7 +398,7 @@ handle_ipv6_cont(struct __ctx_buff *ctx, __u32 secctx, const bool from_host, #if defined(ENABLE_IPSEC) && !defined(TUNNEL_MODE) if (from_proxy && !identity_is_cluster(info->sec_identity)) - ctx->mark = MARK_MAGIC_PROXY_TO_WORLD; + ctx->mark = MARK_MAGIC_SKIP_TPROXY; #endif /* ENABLE_IPSEC && !TUNNEL_MODE */ return CTX_ACT_OK; @@ -756,14 +760,14 @@ handle_ipv4_cont(struct __ctx_buff *ctx, __u32 secctx, const bool from_host, return CTX_ACT_OK; #ifdef ENABLE_HOST_ROUTING - /* add L2 header for L2-less interface, such as cilium_wg0 */ - if (!from_host) { + /* add L2 header for L2-less interface: */ + if (!from_host && THIS_IS_L3_DEV) { bool l2_hdr_required = true; ret = maybe_add_l2_hdr(ctx, ep->ifindex, &l2_hdr_required); if (ret != 0) return ret; - if (l2_hdr_required && ETH_HLEN == 0) { + if (l2_hdr_required) { /* l2 header is added */ l3_off += __ETH_HLEN; if (!____revalidate_data_pull(ctx, &data, &data_end, @@ -847,7 +851,7 @@ handle_ipv4_cont(struct __ctx_buff *ctx, __u32 secctx, const bool from_host, #if defined(ENABLE_IPSEC) && !defined(TUNNEL_MODE) if (from_proxy && !identity_is_cluster(info->sec_identity)) - ctx->mark = MARK_MAGIC_PROXY_TO_WORLD; + ctx->mark = MARK_MAGIC_SKIP_TPROXY; #endif /* ENABLE_IPSEC && !TUNNEL_MODE */ return CTX_ACT_OK; @@ -987,6 +991,10 @@ int handle_l2_announcement(struct __ctx_buff *ctx, struct ipv6hdr *ip6) int ret; __u64 time; + /* Announcing L2 addresses for a L3 device makes no sense: */ + if (THIS_IS_L3_DEV) + return CTX_ACT_OK; + time = config_get(RUNTIME_CONFIG_AGENT_LIVENESS); if (!time) return CTX_ACT_OK; @@ -1208,6 +1216,8 @@ int cil_from_netdev(struct __ctx_buff *ctx) __u32 src_id = UNKNOWN_ID; __be16 proto = 0; + check_and_store_ip_trace_id(ctx); + #ifdef ENABLE_NODEPORT_ACCELERATION __u32 flags = ctx_get_xfer(ctx, XFER_FLAGS); #endif @@ -1262,12 +1272,14 @@ int cil_from_netdev(struct __ctx_buff *ctx) * ignore the return value from do_decrypt. */ do_decrypt(ctx, proto); - if (ctx->mark == MARK_MAGIC_DECRYPT) + if ((ctx->mark & MARK_MAGIC_HOST_MASK) == MARK_MAGIC_DECRYPT) return CTX_ACT_OK; #endif + ret = tcx_early_hook(ctx, proto); + if (ret != CTX_ACT_OK) + return ret; return do_netdev(ctx, proto, UNKNOWN_ID, TRACE_FROM_NETWORK, false); - drop_err: return send_drop_notify_error(ctx, src_id, ret, METRIC_INGRESS); } @@ -1285,6 +1297,8 @@ int cil_from_host(struct __ctx_buff *ctx) __be16 proto = 0; __u32 magic; + check_and_store_ip_trace_id(ctx); + /* Traffic from the host ns going through cilium_host device must * not be subject to EDT rate-limiting. */ @@ -1321,15 +1335,6 @@ int cil_from_host(struct __ctx_buff *ctx) if (magic == MARK_MAGIC_PROXY_INGRESS || magic == MARK_MAGIC_PROXY_EGRESS) obs_point = TRACE_FROM_PROXY; -#ifdef ENABLE_IPSEC - if (magic == MARK_MAGIC_ENCRYPT) { - send_trace_notify(ctx, TRACE_FROM_STACK, identity, UNKNOWN_ID, - TRACE_EP_ID_UNKNOWN, ctx->ingress_ifindex, - TRACE_REASON_ENCRYPTED, 0, proto); - return CTX_ACT_OK; - } -#endif /* ENABLE_IPSEC */ - return do_netdev(ctx, proto, identity, obs_point, true); } @@ -1353,6 +1358,7 @@ int cil_to_netdev(struct __ctx_buff *ctx) __s8 ext_err = 0; bpf_clear_meta(ctx); + check_and_store_ip_trace_id(ctx); if (magic == MARK_MAGIC_HOST || magic == MARK_MAGIC_OVERLAY || ctx_mark_is_encrypted(ctx)) src_sec_identity = HOST_ID; @@ -1365,6 +1371,17 @@ int cil_to_netdev(struct __ctx_buff *ctx) src_sec_identity = get_identity(ctx); #endif + /* Load the ethertype just once: */ + validate_ethertype(ctx, &proto); + +#ifdef ENABLE_IPSEC + if (magic == MARK_MAGIC_ENCRYPT) + send_trace_notify(ctx, TRACE_FROM_STACK, + ctx_load_meta(ctx, CB_ENCRYPT_IDENTITY), UNKNOWN_ID, + TRACE_EP_ID_UNKNOWN, ctx->ingress_ifindex, + TRACE_REASON_ENCRYPTED, 0, proto); +#endif /* ENABLE_IPSEC */ + /* Filter allowed vlan id's and pass them back to kernel. */ if (ctx->vlan_present) { @@ -1388,9 +1405,6 @@ int cil_to_netdev(struct __ctx_buff *ctx) } #endif - /* Load the ethertype just once: */ - validate_ethertype(ctx, &proto); - #ifdef ENABLE_HOST_FIREWALL /* This was initially added for Egress GW. There it's no longer needed, * but it potentially also helps other paths (LB-to-remote-backend ?). @@ -1677,6 +1691,8 @@ int cil_to_host(struct __ctx_buff *ctx) __u32 src_id = 0; __s8 ext_err = 0; + check_and_store_ip_trace_id(ctx); + /* Prefer ctx->mark when it is set to one of the expected values. * Also see https://github.com/cilium/cilium/issues/36329. */ @@ -1737,16 +1753,16 @@ int cil_to_host(struct __ctx_buff *ctx) * * This iptables rule, created by * iptables.Manager.inboundProxyRedirectRule() is ignored by the mark - * MARK_MAGIC_PROXY_TO_WORLD, in the control plane. + * MARK_MAGIC_SKIP_TPROXY, in the control plane. * Technically, it is also ignored by MARK_MAGIC_ENCRYPT but reusing * this mark breaks further processing as its used in the XFRM subsystem. * * Therefore, if the packet's mark is zero, indicating it was forwarded - * from 'cilium_host', mark the packet with MARK_MAGIC_PROXY_TO_WORLD + * from 'cilium_host', mark the packet with MARK_MAGIC_SKIP_TPROXY * and allow it to enter the foward path once punted to stack. */ if (ctx->mark == 0 && THIS_INTERFACE_IFINDEX == CILIUM_NET_IFINDEX) - ctx->mark = MARK_MAGIC_PROXY_TO_WORLD; + ctx->mark = MARK_MAGIC_SKIP_TPROXY; #endif /* !TUNNEL_MODE */ # ifdef ENABLE_NODEPORT diff --git a/bpf/bpf_lxc.c b/bpf/bpf_lxc.c index 03f73252df3ad..a028c282a1ed1 100644 --- a/bpf/bpf_lxc.c +++ b/bpf/bpf_lxc.c @@ -127,14 +127,17 @@ static __always_inline int __per_packet_lb_svc_xlate_4(void *ctx, struct iphdr * l4_off, &key, &tuple, svc, &ct_state_new, false, &cluster_id, ext_err, ENDPOINT_NETNS_COOKIE); + if (IS_ERR(ret)) { + if (ret == DROP_NO_SERVICE) { + if (!CONFIG(enable_no_service_endpoints_routable)) + return handle_nonroutable_endpoints_v4(svc); #ifdef SERVICE_NO_BACKEND_RESPONSE - if (ret == DROP_NO_SERVICE) - ret = tail_call_internal(ctx, CILIUM_CALL_IPV4_NO_SERVICE, - ext_err); + ret = tail_call_internal(ctx, CILIUM_CALL_IPV4_NO_SERVICE, + ext_err); #endif - - if (IS_ERR(ret)) + } return ret; + } } skip_service_lookup: /* Store state to be picked up on the continuation tail call. */ @@ -200,14 +203,17 @@ static __always_inline int __per_packet_lb_svc_xlate_6(void *ctx, struct ipv6hdr l4_off, &key, &tuple, svc, &ct_state_new, false, ext_err, ENDPOINT_NETNS_COOKIE); + if (IS_ERR(ret)) { + if (ret == DROP_NO_SERVICE) { + if (!CONFIG(enable_no_service_endpoints_routable)) + return handle_nonroutable_endpoints_v6(svc); #ifdef SERVICE_NO_BACKEND_RESPONSE - if (ret == DROP_NO_SERVICE) - ret = tail_call_internal(ctx, CILIUM_CALL_IPV6_NO_SERVICE, - ext_err); + ret = tail_call_internal(ctx, CILIUM_CALL_IPV6_NO_SERVICE, + ext_err); #endif - - if (IS_ERR(ret)) + } return ret; + } } skip_service_lookup: @@ -1244,12 +1250,20 @@ static __always_inline int handle_ipv4_from_lxc(struct __ctx_buff *ctx, __u32 *d { struct remote_endpoint_info fake_info = {0}; struct vtep_key vkey = {}; + struct vtep_policy_key vpkey = {.prefixlen = 64, .src_ip = ip4->saddr, .dst_ip = ip4->daddr, }; struct vtep_value *vtep; vkey.vtep_ip = ip4->daddr & VTEP_MASK; vtep = map_lookup_elem(&cilium_vtep_map, &vkey); - if (!vtep) - goto skip_vtep; + if (!vtep) { + if (!info || info->sec_identity == WORLD_IPV4_ID) { + vtep = map_lookup_elem(&cilium_vtep_policy_map, &vpkey); + if (!vtep) + goto skip_vtep; + } else { + goto skip_vtep; + } + } if (vtep->vtep_mac && vtep->tunnel_endpoint) { if (eth_store_daddr(ctx, (__u8 *)&vtep->vtep_mac, 0) < 0) @@ -1515,6 +1529,7 @@ int cil_from_container(struct __ctx_buff *ctx) bool valid_ethertype = validate_ethertype(ctx, &proto); bpf_clear_meta(ctx); + check_and_store_ip_trace_id(ctx); /* Workaround for GH-18311 where veth driver might have recorded * veth's RX queue mapping instead of leaving it at 0. This can @@ -2388,6 +2403,7 @@ int cil_to_container(struct __ctx_buff *ctx) } bpf_clear_meta(ctx); + check_and_store_ip_trace_id(ctx); #if defined(ENABLE_L7_LB) if ((ctx->mark & MARK_MAGIC_HOST_MASK) == MARK_MAGIC_PROXY_EGRESS_EPID) { diff --git a/bpf/bpf_network.c b/bpf/bpf_network.c index 5b133e64feac8..b3c10dd0974c3 100644 --- a/bpf/bpf_network.c +++ b/bpf/bpf_network.c @@ -26,6 +26,7 @@ int cil_from_network(struct __ctx_buff *ctx) enum trace_point obs_point_from = TRACE_FROM_NETWORK; bpf_clear_meta(ctx); + check_and_store_ip_trace_id(ctx); #ifdef ENABLE_IPSEC /* This program should be attached to the tc-ingress of diff --git a/bpf/bpf_overlay.c b/bpf/bpf_overlay.c index 0c439d0e2f82a..3b1d9f1ad4cdd 100644 --- a/bpf/bpf_overlay.c +++ b/bpf/bpf_overlay.c @@ -29,7 +29,6 @@ #include "lib/tailcall.h" #include "lib/common.h" #include "lib/edt.h" -#include "lib/encrypt.h" #include "lib/eps.h" #include "lib/ipv6.h" #include "lib/eth.h" @@ -39,7 +38,6 @@ #include "lib/local_delivery.h" #include "lib/drop.h" #include "lib/identity.h" -#include "lib/node.h" #include "lib/nodeport.h" #include "lib/nodeport_egress.h" #include "lib/clustermesh.h" @@ -59,11 +57,9 @@ static __always_inline int handle_ipv6(struct __ctx_buff *ctx, __s8 *ext_err __maybe_unused) { int ret, l3_off = ETH_HLEN; - struct remote_endpoint_info *info; void *data_end, *data; struct ipv6hdr *ip6; struct endpoint_info *ep; - bool decrypted = false; bool __maybe_unused is_dsr = false; fraginfo_t fraginfo __maybe_unused; @@ -101,68 +97,26 @@ static __always_inline int handle_ipv6(struct __ctx_buff *ctx, if (!revalidate_data(ctx, &data, &data_end, &ip6)) return DROP_INVALID; - /* Lookup the source in the ipcache. Before decryption this will be the - * outer source IP to get the source node ID. After decryption this - * will be the inner source IP to get the source security identity. + /* Maybe overwrite the REMOTE_NODE_ID with + * KUBE_APISERVER_NODE_ID to support upgrade. After v1.12, + * identity_is_remote_node() should be removed. + * + * A packet that has DSR info and comes from `world` may have specific identity when + * a CNP that is using CIDR rules is applied. */ - info = lookup_ip6_remote_endpoint((union v6addr *)&ip6->saddr, 0); + if (identity_is_remote_node(*identity) || + (is_dsr && identity_is_world_ipv6(*identity))) { + struct remote_endpoint_info *info; -#ifdef ENABLE_IPSEC - decrypted = ((ctx->mark & MARK_MAGIC_HOST_MASK) == MARK_MAGIC_DECRYPT); -#endif - if (decrypted) { + info = lookup_ip6_remote_endpoint((union v6addr *)&ip6->saddr, 0); if (info) *identity = info->sec_identity; - - cilium_dbg(ctx, info ? DBG_IP_ID_MAP_SUCCEED6 : DBG_IP_ID_MAP_FAILED6, - ((__u32 *)&ip6->saddr)[3], *identity); - } else { - /* Maybe overwrite the REMOTE_NODE_ID with - * KUBE_APISERVER_NODE_ID to support upgrade. After v1.12, - * identity_is_remote_node() should be removed. - * - * A packet that has DSR info and comes from `world` may have specific identity when - * a CNP that is using CIDR rules is applied. - */ - if (info && (identity_is_remote_node(*identity) || - (is_dsr && identity_is_world_ipv6(*identity)))) - *identity = info->sec_identity; } #ifdef ENABLE_IPSEC - if (!decrypted) { - __u16 node_id; - - /* IPSec is not currently enforce (feature coming soon) - * so for now just handle normally - */ - if (ip6->nexthdr != IPPROTO_ESP) { - update_metrics(ctx_full_len(ctx), METRIC_INGRESS, - REASON_PLAINTEXT); - goto not_esp; - } - - node_id = lookup_ip6_node_id((union v6addr *)&ip6->saddr); - if (!node_id) - return DROP_NO_NODE_ID; - set_ipsec_decrypt_mark(ctx, node_id); - - /* To IPSec stack on cilium_vxlan we are going to pass - * this up the stack but eth_type_trans has already labeled - * this as an OTHERHOST type packet. To avoid being dropped - * by IP stack before IPSec can be processed mark as a HOST - * packet. - */ - ctx_change_type(ctx, PACKET_HOST); - - send_trace_notify(ctx, TRACE_TO_STACK, *identity, UNKNOWN_ID, - TRACE_EP_ID_UNKNOWN, ctx->ingress_ifindex, - TRACE_REASON_ENCRYPTED, 0, bpf_htons(ETH_P_IPV6)); - - return CTX_ACT_OK; - } - ctx->mark = 0; -not_esp: + if (ip6->nexthdr != IPPROTO_ESP) + update_metrics(ctx_full_len(ctx), METRIC_INGRESS, + REASON_PLAINTEXT); #endif #if defined(ENABLE_EGRESS_GATEWAY_COMMON) @@ -336,11 +290,9 @@ static __always_inline int handle_ipv4(struct __ctx_buff *ctx, __u32 *identity, __s8 *ext_err __maybe_unused) { - struct remote_endpoint_info *info; void *data_end, *data; struct iphdr *ip4; struct endpoint_info *ep; - bool decrypted = false; bool __maybe_unused is_dsr = false; fraginfo_t fraginfo __maybe_unused; int ret; @@ -389,101 +341,59 @@ static __always_inline int handle_ipv4(struct __ctx_buff *ctx, if (!revalidate_data(ctx, &data, &data_end, &ip4)) return DROP_INVALID; - /* Lookup the source in the ipcache. Before decryption this will be the - * outer source IP to get the source node ID. After decryption this - * will be the inner source IP to get the source security identity. - */ - info = lookup_ip4_remote_endpoint(ip4->saddr, 0); - -#ifdef ENABLE_IPSEC - decrypted = ((ctx->mark & MARK_MAGIC_HOST_MASK) == MARK_MAGIC_DECRYPT); -#endif - /* If packets are decrypted the key has already been pushed into metadata. */ - if (decrypted) { - if (info) - *identity = info->sec_identity; - - cilium_dbg(ctx, info ? DBG_IP_ID_MAP_SUCCEED4 : DBG_IP_ID_MAP_FAILED4, - ip4->saddr, *identity); - } else { #ifdef ENABLE_VTEP - { - struct vtep_key vkey = {}; - struct vtep_value *vtep; - - vkey.vtep_ip = ip4->saddr & VTEP_MASK; - vtep = map_lookup_elem(&cilium_vtep_map, &vkey); - if (!vtep) - goto skip_vtep; - if (vtep->tunnel_endpoint) { - if (!identity_is_world_ipv4(*identity)) - return DROP_INVALID_VNI; - } + { + struct vtep_key vkey = {}; + struct vtep_value *vtep; + + vkey.vtep_ip = ip4->saddr & VTEP_MASK; + vtep = map_lookup_elem(&cilium_vtep_map, &vkey); + if (!vtep) + goto skip_vtep; + if (vtep->tunnel_endpoint) { + if (!identity_is_world_ipv4(*identity)) + return DROP_INVALID_VNI; } + } skip_vtep: #endif #if defined(ENABLE_CLUSTER_AWARE_ADDRESSING) && defined(ENABLE_INTER_CLUSTER_SNAT) - { - __u32 cluster_id_from_identity = - extract_cluster_id_from_identity(*identity); - - /* When we see inter-cluster communication and if - * the destination is IPV4_INTER_CLUSTER_SNAT, try - * to perform revSNAT. We tailcall from here since - * we saw the complexity issue when we added this - * logic in-line. - */ - if (cluster_id_from_identity != 0 && - cluster_id_from_identity != CLUSTER_ID && - ip4->daddr == IPV4_INTER_CLUSTER_SNAT) { - ctx_store_meta(ctx, CB_SRC_LABEL, *identity); - return tail_call_internal(ctx, - CILIUM_CALL_IPV4_INTER_CLUSTER_REVSNAT, - ext_err); - } + { + __u32 cluster_id_from_identity = + extract_cluster_id_from_identity(*identity); + + /* When we see inter-cluster communication and if + * the destination is IPV4_INTER_CLUSTER_SNAT, try + * to perform revSNAT. We tailcall from here since + * we saw the complexity issue when we added this + * logic in-line. + */ + if (cluster_id_from_identity != 0 && + cluster_id_from_identity != CLUSTER_ID && + ip4->daddr == IPV4_INTER_CLUSTER_SNAT) { + ctx_store_meta(ctx, CB_SRC_LABEL, *identity); + return tail_call_internal(ctx, + CILIUM_CALL_IPV4_INTER_CLUSTER_REVSNAT, + ext_err); } + } #endif - /* See comment at equivalent code in handle_ipv6() */ - if (info && (identity_is_remote_node(*identity) || - (is_dsr && identity_is_world_ipv4(*identity)))) + + /* See comment at equivalent code in handle_ipv6() */ + if (identity_is_remote_node(*identity) || + (is_dsr && identity_is_world_ipv4(*identity))) { + struct remote_endpoint_info *info; + + info = lookup_ip4_remote_endpoint(ip4->saddr, 0); + if (info) *identity = info->sec_identity; } #ifdef ENABLE_IPSEC - if (!decrypted) { - __u16 node_id; - - /* IPSec is not currently enforce (feature coming soon) - * so for now just handle normally - */ - if (ip4->protocol != IPPROTO_ESP) { - update_metrics(ctx_full_len(ctx), METRIC_INGRESS, - REASON_PLAINTEXT); - goto not_esp; - } - - node_id = lookup_ip4_node_id(ip4->saddr); - if (!node_id) - return DROP_NO_NODE_ID; - set_ipsec_decrypt_mark(ctx, node_id); - - /* To IPSec stack on cilium_vxlan we are going to pass - * this up the stack but eth_type_trans has already labeled - * this as an OTHERHOST type packet. To avoid being dropped - * by IP stack before IPSec can be processed mark as a HOST - * packet. - */ - ctx_change_type(ctx, PACKET_HOST); - - send_trace_notify(ctx, TRACE_TO_STACK, *identity, UNKNOWN_ID, - TRACE_EP_ID_UNKNOWN, ctx->ingress_ifindex, - TRACE_REASON_ENCRYPTED, 0, bpf_htons(ETH_P_IP)); - - return CTX_ACT_OK; - } - ctx->mark = 0; -not_esp: + if (ip4->protocol != IPPROTO_ESP) + update_metrics(ctx_full_len(ctx), METRIC_INGRESS, + REASON_PLAINTEXT); #endif #if defined(ENABLE_EGRESS_GATEWAY_COMMON) @@ -660,12 +570,12 @@ int cil_from_overlay(struct __ctx_buff *ctx) { __u32 src_sec_identity = 0; __s8 ext_err = 0; - bool decrypted = false; __u16 proto; int ret; bpf_clear_meta(ctx); ctx_skip_nodeport_clear(ctx); + check_and_store_ip_trace_id(ctx); if (!validate_ethertype(ctx, &proto)) { /* Pass unknown traffic to the stack */ @@ -677,12 +587,9 @@ int cil_from_overlay(struct __ctx_buff *ctx) * * 1. ESP packets coming from overlay (encrypted and not marked) * 2. Non-ESP packets coming from overlay (plain and not marked) - * 3. Non-ESP packets coming from stack re-inserted by xfrm (plain - * and marked with MARK_MAGIC_DECRYPT. Only in IPSec mode.) * * 1. will be traced with TRACE_REASON_ENCRYPTED * 2. will be traced without TRACE_REASON_ENCRYPTED - * 3. will be traced without TRACE_REASON_ENCRYPTED * * Note that 1. contains the ESP packets someone else generated. * In that case, we trace it as "encrypted", but it doesn't mean @@ -692,9 +599,6 @@ int cil_from_overlay(struct __ctx_buff *ctx) * if the packets are ESP, because it doesn't matter for the * non-IPSec mode. */ -#ifdef ENABLE_IPSEC - decrypted = ((ctx->mark & MARK_MAGIC_HOST_MASK) == MARK_MAGIC_DECRYPT); -#endif switch (proto) { #if defined(ENABLE_IPV4) || defined(ENABLE_IPV6) @@ -704,29 +608,29 @@ int cil_from_overlay(struct __ctx_buff *ctx) #ifdef ENABLE_IPV4 case bpf_htons(ETH_P_IP): #endif - /* If packets are decrypted the key has already been pushed into metadata. */ - if (!decrypted) { - struct bpf_tunnel_key key = {}; - - ret = get_tunnel_key(ctx, &key); - if (unlikely(ret < 0)) - goto out; - cilium_dbg(ctx, DBG_DECAP, key.tunnel_id, key.tunnel_label); - - src_sec_identity = get_id_from_tunnel_id(key.tunnel_id, proto); - - /* Any node encapsulating will map any HOST_ID source to be - * presented as REMOTE_NODE_ID, therefore any attempt to signal - * HOST_ID as source from a remote node can be dropped. - */ - if (src_sec_identity == HOST_ID) { - ret = DROP_INVALID_IDENTITY; - goto out; - } - - ctx_store_meta(ctx, CB_SRC_LABEL, src_sec_identity); + + { + struct bpf_tunnel_key key = {}; + + ret = get_tunnel_key(ctx, &key); + if (unlikely(ret < 0)) + goto out; + cilium_dbg(ctx, DBG_DECAP, key.tunnel_id, key.tunnel_label); + + src_sec_identity = get_id_from_tunnel_id(key.tunnel_id, proto); + + /* Any node encapsulating will map any HOST_ID source to be + * presented as REMOTE_NODE_ID, therefore any attempt to signal + * HOST_ID as source from a remote node can be dropped. + */ + if (src_sec_identity == HOST_ID) { + ret = DROP_INVALID_IDENTITY; + goto out; } - break; + + ctx_store_meta(ctx, CB_SRC_LABEL, src_sec_identity); + } + break; #endif /* ENABLE_IPV4 || ENABLE_IPV6 */ default: break; @@ -739,19 +643,9 @@ int cil_from_overlay(struct __ctx_buff *ctx) TRACE_REASON_ENCRYPTED, 0, proto); else #endif - { - enum trace_point obs_point = TRACE_FROM_OVERLAY; - - /* Non-ESP packet marked with MARK_MAGIC_DECRYPT is a packet - * re-inserted from the stack. - */ - if (decrypted) - obs_point = TRACE_FROM_STACK; - - send_trace_notify(ctx, obs_point, src_sec_identity, UNKNOWN_ID, + send_trace_notify(ctx, TRACE_FROM_OVERLAY, src_sec_identity, UNKNOWN_ID, TRACE_EP_ID_UNKNOWN, ctx->ingress_ifindex, TRACE_REASON_UNKNOWN, TRACE_PAYLOAD_LEN, proto); - } switch (proto) { case bpf_htons(ETH_P_IPV6): @@ -803,6 +697,7 @@ int cil_to_overlay(struct __ctx_buff *ctx) __s8 ext_err = 0; bpf_clear_meta(ctx); + check_and_store_ip_trace_id(ctx); /* Load the ethertype just once: */ validate_ethertype(ctx, &proto); @@ -836,12 +731,8 @@ int cil_to_overlay(struct __ctx_buff *ctx) if (!ctx_get_tunnel_key(ctx, &tunnel_key, TUNNEL_KEY_WITHOUT_SRC_IP, 0)) src_sec_identity = get_id_from_tunnel_id(tunnel_key.tunnel_id, ctx_get_protocol(ctx)); -#ifdef ENABLE_IPSEC - if (is_esp(ctx, proto)) - set_identity_mark(ctx, src_sec_identity, MARK_MAGIC_OVERLAY_ENCRYPTED); - else -#endif - set_identity_mark(ctx, src_sec_identity, MARK_MAGIC_OVERLAY); + + set_identity_mark(ctx, src_sec_identity, MARK_MAGIC_OVERLAY); #ifdef ENABLE_NODEPORT if (snat_done) { diff --git a/bpf/bpf_sock.c b/bpf/bpf_sock.c index 2f495940d367e..9c19c73813cd5 100644 --- a/bpf/bpf_sock.c +++ b/bpf/bpf_sock.c @@ -5,6 +5,7 @@ #include #include +#include #include #define SKIP_CALLS_MAP 1 @@ -284,8 +285,18 @@ static __always_inline int __sock4_xlate_fwd(struct bpf_sock_addr *ctx, svc = sock4_wildcard_lookup_full(&key, in_hostns); if (!svc) return -ENXIO; - if (svc->count == 0 && !lb4_svc_is_l7_loadbalancer(svc)) - return -EHOSTUNREACH; + if (svc->count == 0 && !lb4_svc_is_l7_loadbalancer(svc)) { + /* Drop packet when service has no endpoints when this flag is enabled (default) */ + if (CONFIG(enable_no_service_endpoints_routable)) + return -EHOSTUNREACH; + /* Also drop the packet when eTP/iTP is set to Local, allow otherwise. */ + if ((lb4_svc_is_external(svc) && (svc->flags & SVC_FLAG_EXT_LOCAL_SCOPE)) || + (!lb4_svc_is_external(svc) && + (svc->flags2 & SVC_FLAG_INT_LOCAL_SCOPE))) { + return -EHOSTUNREACH; + } + return 0; + } send_trace_sock_notify4(ctx_full, XLATE_PRE_DIRECTION_FWD, dst_ip, bpf_ntohs(dst_port), is_connect); @@ -994,8 +1005,18 @@ static __always_inline int __sock6_xlate_fwd(struct bpf_sock_addr *ctx, svc = sock6_wildcard_lookup_full(&key, in_hostns); if (!svc) return sock6_xlate_v4_in_v6(ctx, udp_only, is_connect); - if (svc->count == 0 && !lb6_svc_is_l7_loadbalancer(svc)) - return -EHOSTUNREACH; + if (svc->count == 0 && !lb6_svc_is_l7_loadbalancer(svc)) { + /* Drop packet when service has no endpoints when this flag is enabled (default) */ + if (CONFIG(enable_no_service_endpoints_routable)) + return -EHOSTUNREACH; + /* Also drop the packet when eTP/iTP is set to Local, allow otherwise. */ + if ((lb6_svc_is_external(svc) && (svc->flags & SVC_FLAG_EXT_LOCAL_SCOPE)) || + (!lb6_svc_is_external(svc) && + (svc->flags2 & SVC_FLAG_INT_LOCAL_SCOPE))) { + return -EHOSTUNREACH; + } + return 0; + } send_trace_sock_notify6(ctx, XLATE_PRE_DIRECTION_FWD, &key.address, bpf_ntohs(dst_port), is_connect); diff --git a/bpf/bpf_wireguard.c b/bpf/bpf_wireguard.c index 37a0fc044a892..b6bc587c782f7 100644 --- a/bpf/bpf_wireguard.c +++ b/bpf/bpf_wireguard.c @@ -21,7 +21,6 @@ #include "lib/common.h" #include "lib/ipv6.h" #include "lib/ipv4.h" -#include "lib/eth.h" #include "lib/dbg.h" #include "lib/trace.h" #include "lib/l3.h" @@ -87,13 +86,13 @@ handle_ipv6(struct __ctx_buff *ctx, __u32 identity, __s8 *ext_err __maybe_unused } #endif - if (!revalidate_data(ctx, &data, &data_end, &ip6)) - return DROP_INVALID; - #ifndef ENABLE_HOST_ROUTING return TC_ACT_OK; #endif + if (!revalidate_data(ctx, &data, &data_end, &ip6)) + return DROP_INVALID; + ep = lookup_ip6_endpoint(ip6); if (ep && !(ep->flags & ENDPOINT_MASK_HOST_DELIVERY)) { int l3_off = ETH_HLEN; @@ -105,13 +104,8 @@ handle_ipv6(struct __ctx_buff *ctx, __u32 identity, __s8 *ext_err __maybe_unused ret = maybe_add_l2_hdr(ctx, ep->ifindex, &l2_hdr_required); if (ret != 0) return ret; - if (l2_hdr_required) { + if (l2_hdr_required) l3_off += __ETH_HLEN; - if (!____revalidate_data_pull(ctx, &data, &data_end, - (void **)&ip6, sizeof(*ip6), - false, l3_off)) - return DROP_INVALID; - } #endif return ipv6_local_delivery(ctx, l3_off, identity, MARK_MAGIC_IDENTITY, ep, @@ -196,9 +190,6 @@ handle_ipv4(struct __ctx_buff *ctx, __u32 identity, __s8 *ext_err __maybe_unused } #endif - if (!revalidate_data(ctx, &data, &data_end, &ip4)) - return DROP_INVALID; - #ifndef ENABLE_HOST_ROUTING /* Without bpf_redirect_neigh() helper, we cannot redirect a * packet to a local endpoint in the direct routing mode, as @@ -212,6 +203,9 @@ handle_ipv4(struct __ctx_buff *ctx, __u32 identity, __s8 *ext_err __maybe_unused return TC_ACT_OK; #endif + if (!revalidate_data(ctx, &data, &data_end, &ip4)) + return DROP_INVALID; + /* Lookup IPv4 address in list of local endpoints and host IPs */ ep = lookup_ip4_endpoint(ip4); if (ep && !(ep->flags & ENDPOINT_MASK_HOST_DELIVERY)) { @@ -269,15 +263,12 @@ int cil_from_wireguard(struct __ctx_buff *ctx) int __maybe_unused ret; __u32 __maybe_unused identity = UNKNOWN_ID; __s8 __maybe_unused ext_err = 0; - __u16 proto = 0; + __u16 proto = ctx_get_protocol(ctx); ctx_skip_nodeport_clear(ctx); - /* Pass unknown traffic to the stack */ - if (!validate_ethertype(ctx, &proto)) - return TC_ACT_OK; - bpf_clear_meta(ctx); + check_and_store_ip_trace_id(ctx); switch (proto) { #ifdef ENABLE_IPV6 @@ -349,6 +340,7 @@ int cil_to_wireguard(struct __ctx_buff *ctx) src_sec_identity = get_identity(ctx); bpf_clear_meta(ctx); + check_and_store_ip_trace_id(ctx); #ifdef ENABLE_NODEPORT if (magic == MARK_MAGIC_OVERLAY) diff --git a/bpf/bpf_xdp.c b/bpf/bpf_xdp.c index 12a0ebea9fb23..4b52dab572666 100644 --- a/bpf/bpf_xdp.c +++ b/bpf/bpf_xdp.c @@ -362,6 +362,7 @@ static __always_inline int check_filters(struct __ctx_buff *ctx) __section_entry int cil_xdp_entry(struct __ctx_buff *ctx) { + check_and_store_ip_trace_id(ctx); return check_filters(ctx); } diff --git a/bpf/include/bpf/config/node.h b/bpf/include/bpf/config/node.h index dc017e814198b..1e1b64bbb2364 100644 --- a/bpf/include/bpf/config/node.h +++ b/bpf/include/bpf/config/node.h @@ -29,3 +29,5 @@ NODE_CONFIG(__u32, direct_routing_dev_ifindex, "Index of the interface used to c NODE_CONFIG(bool, supports_fib_lookup_skip_neigh, "Whether or not BPF_FIB_LOOKUP_SKIP_NEIGH is supported.") + +NODE_CONFIG(__u8, tracing_ip_option_type, "The IP option type to use for packet tracing") diff --git a/bpf/include/bpf/config/sock.h b/bpf/include/bpf/config/sock.h new file mode 100644 index 0000000000000..6004da486bb3e --- /dev/null +++ b/bpf/include/bpf/config/sock.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright Authors of Cilium */ + +/* This configuration data is specific to socketLB (bpf_sock.c). Do not + * import into any other program. + */ + +#pragma once + +#include + +DECLARE_CONFIG(bool, enable_no_service_endpoints_routable, + "Enable routes when service has 0 endpoints") diff --git a/bpf/lib/classifiers.h b/bpf/lib/classifiers.h index 2cdf3d63ff685..0e30927bc8527 100644 --- a/bpf/lib/classifiers.h +++ b/bpf/lib/classifiers.h @@ -122,7 +122,7 @@ ctx_classify(struct __ctx_buff *ctx, __be16 proto, enum trace_point obs_point __ proto = ctx_get_protocol(ctx); /* Check whether the packet comes from a L3 device (no ethernet). */ - if (ETH_HLEN == 0) + if (THIS_IS_L3_DEV) flags |= CLS_FLAG_L3_DEV; /* Check if IPv6 packet. */ diff --git a/bpf/lib/common.h b/bpf/lib/common.h index 57e0cdb6239b6..c5407720850e1 100644 --- a/bpf/lib/common.h +++ b/bpf/lib/common.h @@ -105,6 +105,8 @@ union v6addr { #define d2 d.d2 } __packed; +#define THIS_IS_L3_DEV (ETH_HLEN == 0) + static __always_inline bool validate_ethertype_l2_off(struct __ctx_buff *ctx, int l2_off, __u16 *proto) { @@ -113,7 +115,7 @@ static __always_inline bool validate_ethertype_l2_off(struct __ctx_buff *ctx, void *data = ctx_data(ctx); struct ethhdr *eth; - if (ETH_HLEN == 0) { + if (THIS_IS_L3_DEV) { /* The packet is received on L2-less device. Determine L3 * protocol from skb->protocol. */ @@ -555,7 +557,7 @@ enum metric_dir { * In the IPsec case this becomes the SPI on the wire. */ #define MARK_MAGIC_HOST_MASK 0x0F00 -#define MARK_MAGIC_PROXY_TO_WORLD 0x0800 +#define MARK_MAGIC_SKIP_TPROXY 0x0800 #define MARK_MAGIC_PROXY_EGRESS_EPID 0x0900 /* mark carries source endpoint ID */ #define MARK_MAGIC_PROXY_INGRESS 0x0A00 #define MARK_MAGIC_PROXY_EGRESS 0x0B00 @@ -566,13 +568,6 @@ enum metric_dir { #define MARK_MAGIC_TO_PROXY 0x0200 #define MARK_MAGIC_SNAT_DONE 0x0300 #define MARK_MAGIC_OVERLAY 0x0400 /* mark carries identity */ -/* used to indicate encrypted traffic was tunnel encapsulated - * this is useful in the IPsec code paths where we need to know if overlay - * traffic is encrypted or not. - * - * the SPI bit can be reused since this magic mark is only used POST encryption. - */ -#define MARK_MAGIC_OVERLAY_ENCRYPTED (MARK_MAGIC_OVERLAY | 0x1000) #define MARK_MAGIC_EGW_DONE 0x0500 /* mark carries identity */ #define MARK_MAGIC_KEY_MASK 0xFF00 diff --git a/bpf/lib/drop.h b/bpf/lib/drop.h index 0065a155ecfeb..20a7c71c1fada 100644 --- a/bpf/lib/drop.h +++ b/bpf/lib/drop.h @@ -21,8 +21,9 @@ #include "ratelimit.h" #include "tailcall.h" #include "classifiers.h" +#include "trace_helpers.h" -#define NOTIFY_DROP_VER 2 +#define NOTIFY_DROP_VER 3 struct drop_notify { NOTIFY_CAPTURE_HDR @@ -38,6 +39,7 @@ struct drop_notify { * move to flags_lower/flags_upper). */ __u8 pad2[3]; + __u64 ip_trace_id; }; #ifdef DROP_NOTIFY @@ -63,6 +65,7 @@ int tail_drop_notify(struct __ctx_buff *ctx) { /* Mask needed to calm verifier. */ __u32 error = ctx_load_meta(ctx, 2) & 0xFFFFFFFF; + __u64 ip_trace_id = load_ip_trace_id(); __u64 ctx_len = ctx_full_len(ctx); __u64 cap_len; __u32 meta4 = ctx_load_meta(ctx, 4); @@ -99,6 +102,7 @@ int tail_drop_notify(struct __ctx_buff *ctx) .ext_error = (__s8)(__u8)(error >> 8), .ifindex = ctx_get_ifindex(ctx), .flags = flags, + .ip_trace_id = ip_trace_id, }; ctx_event_output(ctx, &cilium_events, diff --git a/bpf/lib/encrypt.h b/bpf/lib/encrypt.h index 08b0f2a50228d..2e8f02e9f4250 100644 --- a/bpf/lib/encrypt.h +++ b/bpf/lib/encrypt.h @@ -113,16 +113,12 @@ set_ipsec_encrypt(struct __ctx_buff *ctx, struct remote_endpoint_info *info, static __always_inline int do_decrypt(struct __ctx_buff *ctx, __u16 proto) { + struct ipv6hdr __maybe_unused *ip6; + struct iphdr __maybe_unused *ip4; void *data, *data_end; __u8 protocol = 0; __u16 node_id = 0; bool decrypted; -#ifdef ENABLE_IPV6 - struct ipv6hdr *ip6; -#endif -#ifdef ENABLE_IPV4 - struct iphdr *ip4; -#endif decrypted = ((ctx->mark & MARK_MAGIC_HOST_MASK) == MARK_MAGIC_DECRYPT); @@ -224,20 +220,6 @@ ipsec_maybe_redirect_to_encrypt(struct __ctx_buff *ctx, __be16 proto, if (!eth_is_supported_ethertype(proto)) return DROP_UNSUPPORTED_L2; - /* if we are in tunnel mode the overlay prog can detect if the packet - * was already encrypted before encapsulation. - * - * if it was, we can simply short-circuit here and return, no encryption - * is required - * - * this would only be the case when transitioning from v1.17 -> v1.18 - * and can be removed on v1.19 release. - */ -# if defined(TUNNEL_MODE) - if (ctx_is_overlay_encrypted(ctx)) - return CTX_ACT_OK; -# endif /* TUNNEL_MODE */ - switch (proto) { # ifdef ENABLE_IPV4 case bpf_htons(ETH_P_IP): @@ -256,9 +238,6 @@ ipsec_maybe_redirect_to_encrypt(struct __ctx_buff *ctx, __be16 proto, * set_ipsec_encrypt to obtain the correct node ID and spi. */ if (ctx_is_overlay(ctx)) { - /* NOTE: we confirm double-encryption will not occur - * above in the `ctx_is_overlay_encrypted` check - */ fake_info.tunnel_endpoint.ip4 = ip4->daddr; fake_info.flag_has_tunnel_ep = true; @@ -292,9 +271,6 @@ ipsec_maybe_redirect_to_encrypt(struct __ctx_buff *ctx, __be16 proto, /* See comment in IPv4 case. */ if (ctx_is_overlay(ctx)) { - /* NOTE: we confirm double-encryption will not occur - * above in the `ctx_is_overlay_encrypted` check - */ ipv6_addr_copy_unaligned(&fake_info.tunnel_endpoint.ip6, (union v6addr *)&ip6->daddr); fake_info.flag_has_tunnel_ep = true; diff --git a/bpf/lib/fib.h b/bpf/lib/fib.h index c03c7f23e3f11..822a7497e4ad3 100644 --- a/bpf/lib/fib.h +++ b/bpf/lib/fib.h @@ -33,7 +33,7 @@ maybe_add_l2_hdr(struct __ctx_buff *ctx __maybe_unused, * skip L2 addr settings. */ *l2_hdr_required = false; - } else if (ETH_HLEN == 0) { + } else if (THIS_IS_L3_DEV) { /* The packet is going to be redirected from L3 to L2 * device, so we need to create L2 header first. */ diff --git a/bpf/lib/icmp6.h b/bpf/lib/icmp6.h index 25e870613b915..085f5b5d4de64 100644 --- a/bpf/lib/icmp6.h +++ b/bpf/lib/icmp6.h @@ -123,7 +123,7 @@ int icmp6_send_ndisc_adv(struct __ctx_buff *ctx, int nh_off, __u8 opts[8], opts_old[8]; const int csum_off = nh_off + ICMP6_CSUM_OFFSET; union v6addr target_ip; - __be32 sum; + __be32 sum = 0; /* * According to RFC4861, sections 4.3 and 7.2.2 unicast neighbour @@ -138,8 +138,15 @@ int icmp6_send_ndisc_adv(struct __ctx_buff *ctx, int nh_off, */ if (ctx_load_bytes(ctx, nh_off + ICMP6_ND_OPTS, opts_old, sizeof(opts_old)) < 0) { + __be32 plen_old = 0; + __be32 plen = bpf_htonl(8); + if (icmp6_ndisc_adv_addopt(ctx) < 0) return DROP_INVALID; + + /* Account for the pseudoheader change in payload length */ + sum = csum_diff(&plen_old, sizeof(__be32), &plen, + sizeof(__be32), 0); } if (ctx_load_bytes(ctx, nh_off + sizeof(struct ipv6hdr), &icmp6hdr_old, @@ -171,11 +178,9 @@ int icmp6_send_ndisc_adv(struct __ctx_buff *ctx, int nh_off, sizeof(icmp6hdr), 0) < 0) return DROP_WRITE_ERROR; - /* fixup checksums */ - sum = csum_diff(&icmp6hdr_old, sizeof(icmp6hdr_old), - &icmp6hdr, sizeof(icmp6hdr), 0); - if (l4_csum_replace(ctx, csum_off, 0, sum, BPF_F_PSEUDO_HDR) < 0) - return DROP_CSUM_L4; + /* ICMPv6 header has changed */ + sum = csum_diff(&icmp6hdr_old, sizeof(icmp6hdr_old), &icmp6hdr, + sizeof(icmp6hdr), sum); /* get old options */ if (ctx_load_bytes(ctx, nh_off + ICMP6_ND_OPTS, opts_old, @@ -195,8 +200,8 @@ int icmp6_send_ndisc_adv(struct __ctx_buff *ctx, int nh_off, if (ctx_store_bytes(ctx, nh_off + ICMP6_ND_OPTS, opts, sizeof(opts), 0) < 0) return DROP_WRITE_ERROR; - /* fixup checksum */ - sum = csum_diff(opts_old, sizeof(opts_old), opts, sizeof(opts), 0); + /* Options have changed */ + sum = csum_diff(opts_old, sizeof(opts_old), opts, sizeof(opts), sum); if (l4_csum_replace(ctx, csum_off, 0, sum, BPF_F_PSEUDO_HDR) < 0) return DROP_CSUM_L4; diff --git a/bpf/lib/identity.h b/bpf/lib/identity.h index 13be62779c897..f00938a9e3e43 100644 --- a/bpf/lib/identity.h +++ b/bpf/lib/identity.h @@ -181,10 +181,6 @@ static __always_inline __u32 inherit_identity_from_host(struct __ctx_buff *ctx, *identity = get_identity(ctx); } else if (magic == MARK_MAGIC_HOST) { *identity = HOST_ID; -#ifdef ENABLE_IPSEC - } else if (magic == MARK_MAGIC_ENCRYPT) { - *identity = ctx_load_meta(ctx, CB_ENCRYPT_IDENTITY); -#endif } else { #if defined ENABLE_IPV4 && defined ENABLE_IPV6 __u16 proto = ctx_get_protocol(ctx); diff --git a/bpf/lib/ip_options.h b/bpf/lib/ip_options.h new file mode 100644 index 0000000000000..4ee6b5e1a88f5 --- /dev/null +++ b/bpf/lib/ip_options.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright Authors of Cilium */ + +#include "common.h" + +/* Length of the initial supported IPv4 trace_id option (4 bytes). + * IPv4 IP options consist of 2 fixed bytes for the type and length, + * followed by a variable-length data field. An option length of 4 bytes + * indicates 2 fixed bytes for the type and length fields, and 2 bytes of + * ip_trace_id data. + */ +#define OPT16_LEN 4 + +/* Length of the second supported IPv4 trace_id option (6 bytes). + * Indicates 4 bytes of ip_trace_id data. + */ +#define OPT32_LEN 6 + +/* Length of the third supported IPv4 trace_id option (10 bytes). + * Indicates 8 bytes of ip_trace_id data. + */ +#define OPT64_LEN 10 + +/* Maximum number of IPv4 options to process. */ +#define MAX_IPV4_OPTS 3 + +/* The minimum value for IHL which corresponds to a packet with no options. + * + * A standard IP packet header has 20 bytes and the IHL is the number of 32 byte + * words. + */ +#define IHL_WITH_NO_OPTS 5 + +/* Signifies that options were parsed correctly but no trace ID was found. */ +#define TRACE_ID_NOT_FOUND 0 + +/* Signifies a failure to determine the trace ID based on an unspecified error. */ +#define TRACE_ID_ERROR -1 + +/* Signifies that the trace ID was found but it was invalid. */ +#define TRACE_ID_INVALID -2 + +/* Signifies a failure to determine trace ID because the IP family was not found. */ +#define TRACE_ID_NO_FAMILY -3 + +/* Signifies a failure to determine trace ID because the IP option length was + * not supported. + */ +#define TRACE_ID_UNSUPPORTED_LENGTH_ERROR -4 + +/* Signifies trace points which are being ignored because they're in IPv6 + * code and not supported yet. + */ +#define TRACE_ID_SKIP_IPV6 -100 + +/* trace_id_from_ip4 parses the IP options and returns the trace ID. + * + * See trace_id_from_ctx for more info. + */ +static __always_inline int +trace_id_from_ip4(struct __ctx_buff *ctx, __s64 *value, + const struct iphdr *ip4, + __u8 trace_ip_opt_type) +{ + __u8 opt_type; + __u32 offset; + __u8 optlen; + __u32 end; + int i; + + if (ip4->ihl <= IHL_WITH_NO_OPTS) + return TRACE_ID_NOT_FOUND; + + offset = ETH_HLEN + sizeof(struct iphdr); + end = offset + (ip4->ihl << 2); + +#pragma unroll(MAX_IPV4_OPTS) + for (i = 0; i < MAX_IPV4_OPTS && offset < end; i++) { + /* We load the option header 1 field at a time since different types + * have different formats. + * + * "Options 0 and 1 are exactly one octet which is their type field. All + * other options have their one octet type field, followed by a one + * octet length field, followed by length-2 octets of option data." + * + * Ref: https://www.iana.org/assignments/ip-parameters/ip-parameters.xhtml + */ + + if (ctx_load_bytes(ctx, offset, &opt_type, 1) < 0) + return TRACE_ID_ERROR; + + if (opt_type == IPOPT_END) + break; + + if (opt_type == IPOPT_NOOP) { + offset++; + continue; + } + + if (ctx_load_bytes(ctx, offset + 1, &optlen, 1) < 0) + return TRACE_ID_ERROR; + + if (opt_type != trace_ip_opt_type) { + offset += optlen; + continue; + } + + if (optlen != OPT16_LEN && optlen != OPT32_LEN && optlen != OPT64_LEN) + return TRACE_ID_INVALID; + + switch (optlen) { + case OPT16_LEN: { + __s16 temp; + + if (ctx_load_bytes(ctx, offset + 2, &temp, sizeof(temp)) < 0) + return TRACE_ID_ERROR; + *value = bpf_ntohs(temp); + return 0; + } + case OPT32_LEN: { + __s32 temp; + + if (ctx_load_bytes(ctx, offset + 2, &temp, sizeof(temp)) < 0) + return TRACE_ID_ERROR; + *value = bpf_ntohl(temp); + return 0; + } + case OPT64_LEN: { + __s64 temp; + + if (ctx_load_bytes(ctx, offset + 2, &temp, sizeof(temp)) < 0) + return TRACE_ID_ERROR; + *value = __bpf_be64_to_cpu(temp); + return 0; + } + default: + return TRACE_ID_UNSUPPORTED_LENGTH_ERROR; + } + } + return TRACE_ID_NOT_FOUND; +} + +/* + * Parses the context to extract the trace ID from the IP options. + * + * Arguments: + * - ctx: The context buffer from which the IP options will be read. + * - value: A pointer to an __s64 where the resulting trace ID will be stored. + * - ip_opt_type_value: The type value of the IP option that contains the trace ID. + * + * Prerequisites: + * - Supports reading a trace ID embedded in IP options with lengths of 2, 4, or 8 bytes. + * - No support for trace_ids that are not 2, 4, or 8 bytes. + * + * Outputs: + * - Returns 0 if the trace ID is found. + * - Returns TRACE_ID_NOT_FOUND if no trace ID is found in the options. + * - Returns TRACE_ID_INVALID if the found trace ID is invalid (e.g., non-positive). + * - Returns TRACE_ID_ERROR if there is an error during parsing. + * - Returns TRACE_ID_NO_FAMILY if the packet is not IPv4. + * - Returns TRACE_ID_SKIP_IPV6 if the packet is IPv6. + */ +static __always_inline int +trace_id_from_ctx(struct __ctx_buff *ctx, __s64 *value, __u8 ip_opt_type_value) +{ + void *data, *data_end; + __s64 trace_id = 0; + struct iphdr *ip4; + __u16 proto; + int ret; + + if (!validate_ethertype(ctx, &proto)) + return TRACE_ID_ERROR; + + if (proto == bpf_htons(ETH_P_IPV6)) + return TRACE_ID_SKIP_IPV6; + + if (proto != bpf_htons(ETH_P_IP)) + return TRACE_ID_NO_FAMILY; + + if (!revalidate_data(ctx, &data, &data_end, &ip4)) + return TRACE_ID_ERROR; + + ret = trace_id_from_ip4(ctx, &trace_id, ip4, ip_opt_type_value); + if (IS_ERR(ret)) + return ret; + + *value = trace_id; + return 0; +} diff --git a/bpf/lib/lb.h b/bpf/lib/lb.h index 332616af3ea25..984caed2ad000 100644 --- a/bpf/lib/lb.h +++ b/bpf/lib/lb.h @@ -398,6 +398,18 @@ bool lb6_svc_is_l7_loadbalancer(const struct lb6_service *svc __maybe_unused) #endif } +static __always_inline +bool lb4_svc_is_external(const struct lb4_service *svc __maybe_unused) +{ + return svc->flags & (SVC_FLAG_EXTERNAL_IP | SVC_FLAG_LOADBALANCER | SVC_FLAG_NODEPORT); +} + +static __always_inline +bool lb6_svc_is_external(const struct lb6_service *svc __maybe_unused) +{ + return svc->flags & (SVC_FLAG_EXTERNAL_IP | SVC_FLAG_LOADBALANCER | SVC_FLAG_NODEPORT); +} + static __always_inline int reverse_map_l4_port(struct __ctx_buff *ctx, __u8 nexthdr, __be16 old_port, __be16 port, int l4_off, struct csum_offset *csum_off) @@ -2420,3 +2432,29 @@ __wsum icmp_wsum_accumulate(void *data_start, void *data_end, int sample_len) } #endif /* SERVICE_NO_BACKEND_RESPONSE */ + +static __always_inline +int handle_nonroutable_endpoints_v4(struct lb4_service *svc) +{ + if ((lb4_svc_is_external(svc) && + (svc->flags & SVC_FLAG_EXT_LOCAL_SCOPE)) || + (!lb4_svc_is_external(svc) && + (svc->flags2 & SVC_FLAG_INT_LOCAL_SCOPE))) { + return DROP_NO_SERVICE; + } + /* continue via the slowpath */ + return CTX_ACT_OK; +} + +static __always_inline +int handle_nonroutable_endpoints_v6(struct lb6_service *svc) +{ + if ((lb6_svc_is_external(svc) && + (svc->flags & SVC_FLAG_EXT_LOCAL_SCOPE)) || + (!lb6_svc_is_external(svc) && + (svc->flags2 & SVC_FLAG_INT_LOCAL_SCOPE))) { + return DROP_NO_SERVICE; + } + /* continue via the slowpath */ + return CTX_ACT_OK; +} diff --git a/bpf/lib/mcast.h b/bpf/lib/mcast.h index d169d3e1a15d4..324ab640d8a7e 100644 --- a/bpf/lib/mcast.h +++ b/bpf/lib/mcast.h @@ -340,7 +340,6 @@ static long __mcast_ep_delivery(__maybe_unused void *sub_map, struct _mcast_ep_delivery_ctx *cb_ctx) { int ret = 0; - __u32 tunnel_id = WORLD_ID; __u8 from_overlay = 0; struct bpf_tunnel_key tun_key = {0}; @@ -372,13 +371,7 @@ static long __mcast_ep_delivery(__maybe_unused void *sub_map, if (from_overlay) return 0; -#ifdef ENABLE_ENCRYPTED_OVERLAY - /* if encrypted overlay is enabled we'll mark the packet for - * encryption via the tunnel ID. - */ - tunnel_id = ENCRYPTED_OVERLAY_ID; -#endif /* ENABLE_ENCRYPTED_OVERLAY */ - tun_key.tunnel_id = tunnel_id; + tun_key.tunnel_id = WORLD_ID; tun_key.remote_ipv4 = bpf_ntohl(sub->saddr); tun_key.tunnel_ttl = IPDEFTTL; diff --git a/bpf/lib/nat.h b/bpf/lib/nat.h index 863f7403bb960..eb1bb3c1e5a53 100644 --- a/bpf/lib/nat.h +++ b/bpf/lib/nat.h @@ -473,6 +473,10 @@ snat_v4_rewrite_headers(struct __ctx_buff *ctx, __u8 nexthdr, int l3_off, if (ctx_store_bytes(ctx, l3_off + addr_off, &new_addr, 4, 0) < 0) return DROP_WRITE_ERROR; + /* Amend the L3 checksum due to changing the addresses. */ + if (ipv4_csum_update_by_diff(ctx, l3_off, sum) < 0) + return DROP_CSUM_L3; + if (has_l4_header) { int flags = BPF_F_PSEUDO_HDR; struct csum_offset csum = {}; @@ -516,10 +520,6 @@ snat_v4_rewrite_headers(struct __ctx_buff *ctx, __u8 nexthdr, int l3_off, return DROP_CSUM_L4; } - /* Amend the L3 checksum due to changing the addresses. */ - if (ipv4_csum_update_by_diff(ctx, l3_off, sum) < 0) - return DROP_CSUM_L3; - return 0; } diff --git a/bpf/lib/nodeport.h b/bpf/lib/nodeport.h index de297f63008ae..27805b3768720 100644 --- a/bpf/lib/nodeport.h +++ b/bpf/lib/nodeport.h @@ -27,6 +27,8 @@ #include "fib.h" #include "srv6.h" +DECLARE_CONFIG(bool, enable_no_service_endpoints_routable, + "Enable routes when service has 0 endpoints") DECLARE_CONFIG(__u16, device_mtu, "MTU of the device the bpf program is attached to (default: MTU set in node_config.h by agent)") ASSIGN_CONFIG(__u16, device_mtu, MTU) #define THIS_MTU CONFIG(device_mtu) /* Backwards compatibility */ @@ -113,7 +115,7 @@ nodeport_add_tunnel_encap(struct __ctx_buff *ctx, __u32 src_ip, __be16 src_port, * Otherwise, the kernel will drop such request in * https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/net/core/filter.c?h=v6.7.4#n2147 */ - if (ETH_HLEN == 0) { + if (THIS_IS_L3_DEV) { int ret; ret = add_l2_hdr(ctx); @@ -146,7 +148,7 @@ nodeport_add_tunnel_encap_opt(struct __ctx_buff *ctx, __u32 src_ip, __be16 src_p * Otherwise, the kernel will drop such request in * https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/net/core/filter.c?h=v6.7.4#n2147 */ - if (ETH_HLEN == 0) { + if (THIS_IS_L3_DEV) { int ret; ret = add_l2_hdr(ctx); @@ -1351,13 +1353,15 @@ static __always_inline int nodeport_svc_lb6(struct __ctx_buff *ctx, key, tuple, svc, &ct_state_svc, nodeport_xlate6(svc, tuple), ext_err, 0); if (IS_ERR(ret)) { -#ifdef SERVICE_NO_BACKEND_RESPONSE if (ret == DROP_NO_SERVICE) { + if (!CONFIG(enable_no_service_endpoints_routable)) + return handle_nonroutable_endpoints_v6(svc); +#ifdef SERVICE_NO_BACKEND_RESPONSE edt_set_aggregate(ctx, 0); ret = tail_call_internal(ctx, CILIUM_CALL_IPV6_NO_SERVICE, - ext_err); - } + ext_err); #endif + } if (ret == LB_PUNT_TO_STACK) { *punt_to_stack = true; return CTX_ACT_OK; @@ -2706,14 +2710,17 @@ static __always_inline int nodeport_svc_lb4(struct __ctx_buff *ctx, nodeport_xlate4(svc, tuple), &cluster_id, ext_err, 0); } if (IS_ERR(ret)) { -#ifdef SERVICE_NO_BACKEND_RESPONSE if (ret == DROP_NO_SERVICE) { + if (!CONFIG(enable_no_service_endpoints_routable)) + return handle_nonroutable_endpoints_v4(svc); + +#ifdef SERVICE_NO_BACKEND_RESPONSE /* Packet is TX'ed back out, avoid EDT false-positives: */ edt_set_aggregate(ctx, 0); ret = tail_call_internal(ctx, CILIUM_CALL_IPV4_NO_SERVICE, ext_err); - } #endif + } if (ret == LB_PUNT_TO_STACK) { *punt_to_stack = true; return CTX_ACT_OK; diff --git a/bpf/lib/overloadable_skb.h b/bpf/lib/overloadable_skb.h index 7842e1d2fdc86..acc8d11406f63 100644 --- a/bpf/lib/overloadable_skb.h +++ b/bpf/lib/overloadable_skb.h @@ -247,14 +247,6 @@ static __always_inline bool ctx_is_overlay(const struct __sk_buff *ctx) return (ctx->mark & MARK_MAGIC_HOST_MASK) == MARK_MAGIC_OVERLAY; } -static __always_inline bool ctx_is_overlay_encrypted(const struct __sk_buff *ctx) -{ - if (!is_defined(HAVE_ENCAP)) - return false; - - return (ctx->mark & MARK_MAGIC_KEY_MASK) == MARK_MAGIC_OVERLAY_ENCRYPTED; -} - static __always_inline bool ctx_mark_is_encrypted(const struct __sk_buff *ctx) { if (!is_defined(ENABLE_WIREGUARD) && !is_defined(ENABLE_IPSEC)) diff --git a/bpf/lib/trace.h b/bpf/lib/trace.h index 5909a124ada59..af7d97a497cac 100644 --- a/bpf/lib/trace.h +++ b/bpf/lib/trace.h @@ -29,6 +29,7 @@ #include "metrics.h" #include "ratelimit.h" #include "classifiers.h" +#include "trace_helpers.h" /* Reasons for forwarding a packet, keep in sync with pkg/monitor/datapath_trace.go */ enum trace_reason { @@ -165,9 +166,14 @@ struct trace_notify { }; union v6addr orig_ip6; }; + __u64 ip_trace_id; }; #ifdef TRACE_NOTIFY + +/* Trace notify version 2 includes IP Trace support. */ +#define NOTIFY_TRACE_VER 2 + static __always_inline bool emit_trace_notify(enum trace_point obs_point, __u32 monitor) { @@ -205,6 +211,7 @@ _send_trace_notify(struct __ctx_buff *ctx, enum trace_point obs_point, enum trace_reason reason, __u32 monitor, __be16 proto, __u16 line, __u8 file) { + __u64 ip_trace_id = load_ip_trace_id(); __u64 ctx_len = ctx_full_len(ctx); __u64 cap_len; struct ratelimit_key rkey = { @@ -233,13 +240,14 @@ _send_trace_notify(struct __ctx_buff *ctx, enum trace_point obs_point, msg = (typeof(msg)) { __notify_common_hdr(CILIUM_NOTIFY_TRACE, obs_point), - __notify_pktcap_hdr((__u32)ctx_len, (__u16)cap_len, NOTIFY_CAPTURE_VER), + __notify_pktcap_hdr((__u32)ctx_len, (__u16)cap_len, NOTIFY_TRACE_VER), .src_label = src, .dst_label = dst, .dst_id = dst_id, .reason = reason, .flags = flags, .ifindex = ifindex, + .ip_trace_id = ip_trace_id, }; memset(&msg.orig_ip6, 0, sizeof(union v6addr)); @@ -254,6 +262,7 @@ _send_trace_notify4(struct __ctx_buff *ctx, enum trace_point obs_point, __u32 ifindex, enum trace_reason reason, __u32 monitor, __u16 line, __u8 file) { + __u64 ip_trace_id = load_ip_trace_id(); __u64 ctx_len = ctx_full_len(ctx); __u64 cap_len; struct ratelimit_key rkey = { @@ -282,7 +291,7 @@ _send_trace_notify4(struct __ctx_buff *ctx, enum trace_point obs_point, msg = (typeof(msg)) { __notify_common_hdr(CILIUM_NOTIFY_TRACE, obs_point), - __notify_pktcap_hdr((__u32)ctx_len, (__u16)cap_len, NOTIFY_CAPTURE_VER), + __notify_pktcap_hdr((__u32)ctx_len, (__u16)cap_len, NOTIFY_TRACE_VER), .src_label = src, .dst_label = dst, .dst_id = dst_id, @@ -290,6 +299,7 @@ _send_trace_notify4(struct __ctx_buff *ctx, enum trace_point obs_point, .ifindex = ifindex, .flags = flags, .orig_ip4 = orig_addr, + .ip_trace_id = ip_trace_id, }; ctx_event_output(ctx, &cilium_events, @@ -303,6 +313,7 @@ _send_trace_notify6(struct __ctx_buff *ctx, enum trace_point obs_point, __u16 dst_id, __u32 ifindex, enum trace_reason reason, __u32 monitor, __u16 line, __u8 file) { + __u64 ip_trace_id = load_ip_trace_id(); __u64 ctx_len = ctx_full_len(ctx); __u64 cap_len; struct ratelimit_key rkey = { @@ -331,13 +342,14 @@ _send_trace_notify6(struct __ctx_buff *ctx, enum trace_point obs_point, msg = (typeof(msg)) { __notify_common_hdr(CILIUM_NOTIFY_TRACE, obs_point), - __notify_pktcap_hdr((__u32)ctx_len, (__u16)cap_len, NOTIFY_CAPTURE_VER), + __notify_pktcap_hdr((__u32)ctx_len, (__u16)cap_len, NOTIFY_TRACE_VER), .src_label = src, .dst_label = dst, .dst_id = dst_id, .reason = reason, .ifindex = ifindex, .flags = flags, + .ip_trace_id = ip_trace_id, }; ipv6_addr_copy(&msg.orig_ip6, orig_addr); diff --git a/bpf/lib/trace_helpers.h b/bpf/lib/trace_helpers.h new file mode 100644 index 0000000000000..ac8e99c162cf6 --- /dev/null +++ b/bpf/lib/trace_helpers.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright Authors of Cilium */ + +#pragma once + +#include +#include +#include "common.h" +#include "ip_options.h" + +/* Define the ip trace ID map with __u64 trace_id */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); /* only one key */ + __type(value, __u64); /* trace_id type */ + __uint(pinning, LIBBPF_PIN_BY_NAME); +} cilium_percpu_trace_id __section_maps_btf; + +/* bpf_trace_id_set sets the trace_id in the map. */ +static __always_inline void bpf_trace_id_set(__u64 trace_id) +{ + __u32 zero = 0; + __u64 *value = map_lookup_elem(&cilium_percpu_trace_id, &zero); + + if (value) + *value = trace_id; +} + +/* bpf_trace_id_get retrieves the trace_id from the map. */ +static __always_inline __u64 bpf_trace_id_get(void) +{ + __u32 zero = 0; + __u64 *value = map_lookup_elem(&cilium_percpu_trace_id, &zero); + + if (value) + return *value; + return 0; +} + +/* Function to parse and store the trace_id if the feature is enabled. */ +static __always_inline void +check_and_store_ip_trace_id(struct __ctx_buff *ctx) +{ + __s64 trace_id = 0; + int ret; + + if (CONFIG(tracing_ip_option_type) == 0) { + bpf_trace_id_set(0); + return; + } + + ret = trace_id_from_ctx(ctx, &trace_id, CONFIG(tracing_ip_option_type)); + if (IS_ERR(ret)) + bpf_trace_id_set(0); + else + bpf_trace_id_set(trace_id); +} + +static __always_inline __u64 load_ip_trace_id(void) +{ + return bpf_trace_id_get(); +} diff --git a/bpf/lib/vtep.h b/bpf/lib/vtep.h index 7111b5c0aa853..63d4445d784a3 100644 --- a/bpf/lib/vtep.h +++ b/bpf/lib/vtep.h @@ -11,6 +11,12 @@ struct vtep_key { __u32 vtep_ip; }; +struct vtep_policy_key { + __u32 prefixlen; + __u32 src_ip; + __u32 dst_ip; +}; + struct vtep_value { __u64 vtep_mac; __u32 tunnel_endpoint; @@ -25,4 +31,13 @@ struct { __uint(max_entries, VTEP_MAP_SIZE); __uint(map_flags, CONDITIONAL_PREALLOC); } cilium_vtep_map __section_maps_btf; + +struct { + __uint(type, BPF_MAP_TYPE_LPM_TRIE); + __type(key, struct vtep_policy_key); + __type(value, struct vtep_value); + __uint(pinning, LIBBPF_PIN_BY_NAME); + __uint(max_entries, VTEP_POLICY_MAP_SIZE); + __uint(map_flags, BPF_F_NO_PREALLOC); +} cilium_vtep_policy_map __section_maps_btf; #endif /* ENABLE_VTEP */ diff --git a/bpf/lib/wireguard.h b/bpf/lib/wireguard.h index 25db29f6c25d6..fc6b2684439dc 100644 --- a/bpf/lib/wireguard.h +++ b/bpf/lib/wireguard.h @@ -195,10 +195,8 @@ static __always_inline bool strict_allow(struct __ctx_buff *ctx, __be16 proto) { struct remote_endpoint_info __maybe_unused *dest_info, __maybe_unused *src_info; bool __maybe_unused in_strict_cidr = false; + struct iphdr __maybe_unused *ip4; void *data, *data_end; -#ifdef ENABLE_IPV4 - struct iphdr *ip4; -#endif switch (proto) { #ifdef ENABLE_IPV4 diff --git a/bpf/node_config.h b/bpf/node_config.h index 4f9bb7a08b300..44bef60267b69 100644 --- a/bpf/node_config.h +++ b/bpf/node_config.h @@ -45,12 +45,6 @@ #define LOCAL_NODE_ID 6 #define REMOTE_NODE_ID 6 #define KUBE_APISERVER_NODE_ID 7 -/* This identity should never be seen on ingress or egress traffic to/from a - * node. - * It signals that the skb is overlay traffic that must be IPSec encrypted - * before it leaves the host. - */ -#define ENCRYPTED_OVERLAY_ID 11 #define CILIUM_HOST_MAC { .addr = { 0xce, 0x72, 0xa7, 0x03, 0x88, 0x56 } } #define NODEPORT_PORT_MIN 30000 #define NODEPORT_PORT_MAX 32767 @@ -123,6 +117,7 @@ #define THROTTLE_MAP_SIZE 65536 #define ENABLE_ARP_RESPONDER #define VTEP_MAP_SIZE 8 +#define VTEP_POLICY_MAP_SIZE 16384 #define ENDPOINTS_MAP_SIZE 65536 #define METRICS_MAP_SIZE 65536 #define CILIUM_NET_MAC { .addr = { 0xce, 0x72, 0xa7, 0x03, 0x88, 0x57 } } diff --git a/bpf/tests/bpftest/bpf_test.go b/bpf/tests/bpftest/bpf_test.go index 57fbfe31ed2d4..91e9299fca4f4 100644 --- a/bpf/tests/bpftest/bpf_test.go +++ b/bpf/tests/bpftest/bpf_test.go @@ -300,8 +300,7 @@ func loadAndRunSpec(t *testing.T, entry fs.DirEntry, instrLog io.Writer) []*cove } func loadAndPrepSpec(t *testing.T, elfPath string) *ebpf.CollectionSpec { - logger := hivetest.Logger(t) - spec, err := bpf.LoadCollectionSpec(logger, elfPath) + spec, err := ebpf.LoadCollectionSpec(elfPath) if err != nil { t.Fatalf("load spec %s: %v", elfPath, err) } diff --git a/bpf/tests/classifiers_common.h b/bpf/tests/classifiers_common.h index 05b4c23e0d536..b634d547283b7 100644 --- a/bpf/tests/classifiers_common.h +++ b/bpf/tests/classifiers_common.h @@ -44,7 +44,7 @@ ASSIGN_CONFIG(__u32, trace_payload_len_overlay, 20UL); static __always_inline void adjust_l2(struct __ctx_buff *ctx) { - if (ETH_HLEN != 0) + if (!THIS_IS_L3_DEV) return; void *data = (void *)(long)ctx->data; diff --git a/bpf/tests/ip_options_trace_id.c b/bpf/tests/ip_options_trace_id.c new file mode 100644 index 0000000000000..59eefcfa7f590 --- /dev/null +++ b/bpf/tests/ip_options_trace_id.c @@ -0,0 +1,975 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright Authors of Cilium */ + +#include +#include "common.h" +#include "pktgen.h" +#include "node_config.h" + +#define DEBUG +#include + +/* Used to define IP options for packet generation. */ +struct ipopthdr { + /* type field of the IP option. */ + __u8 type; + /* len field of the IP option. Usually equal to total length of the IP + * option, including type and len. Can be specified different from data + * length for testing purposes. If zero, it will not be written to the + * packet, so that tests can specify single-byte options. + */ + __u8 len; + /* Arbitrary data for the payload of the IP option. */ + __u8 *data; + /* Length of the data field in bytes. Must match exactly. */ + __u8 data_len; +}; + +/* Injects a packet into the ctx with the IPv4 options specified. See comments + * on the struct for more details on how to specify options. The total byte + * content of the options must align on 4-byte boundaries so that the IHL can be + * accurate. + * opts_len: the number of options in opts. + * opts_bytes: the total number of bytes in options. + */ +static __always_inline __maybe_unused int +gen_packet_with_options(struct __sk_buff *ctx, + const struct ipopthdr *opts, + __u8 opts_len, __u8 opt_bytes) +{ + struct pktgen builder; + struct iphdr *l3; + __u8 *new_opt; + int i, j, new_opt_len; + + if (opt_bytes % 4 != 0) + return TEST_ERROR; + pktgen__init(&builder, ctx); + if (!pktgen__push_ethhdr(&builder)) + return TEST_ERROR; + l3 = pktgen__push_default_iphdr_with_options(&builder, opt_bytes / 4); + if (!l3) + return TEST_ERROR; + + new_opt = (__u8 *)&l3[1]; + for (i = 0; i < opts_len; i++) { + new_opt_len = 0; + new_opt[0] = opts[i].type; + new_opt_len++; + if (opts[i].len != 0) { + new_opt[new_opt_len] = opts[i].len; + new_opt_len++; + } + for (j = 0; j < opts[i].data_len; j++) { + new_opt[new_opt_len] = opts[i].data[j]; + new_opt_len++; + } + new_opt += new_opt_len; + } + if (!pktgen__push_data(&builder, default_data, sizeof(default_data))) + return TEST_ERROR; + pktgen__finish(&builder); + return TEST_PASS; +} + +/* Following section has tests for trace ID feature for packet + * validation and preprocessing. + */ + +/* Test packet with no l3 header should return TRACE_ID_ERROR. */ +PKTGEN("tc", "extract_trace_id_with_no_l3_header_error") +int test_extract_trace_id_with_no_l3_header_error_pktgen(struct __ctx_buff *ctx) +{ + struct pktgen builder; + + pktgen__init(&builder, ctx); + if (!pktgen__push_ethhdr(&builder)) + return TEST_ERROR; + if (!pktgen__push_data(&builder, default_data, sizeof(default_data))) + return TEST_ERROR; + pktgen__finish(&builder); + return TEST_PASS; +} + +CHECK("tc", "extract_trace_id_with_no_l3_header_error") +int test_extract_trace_id_with_no_l3_header_error_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = TRACE_ID_ERROR; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test packet with no eth header should return TRACE_ID_NO_FAMILY. */ +PKTGEN("tc", "extract_trace_id_with_no_eth_header_no_family") +int test_extract_trace_id_with_no_eth_header_no_family_pktgen(struct __ctx_buff *ctx) +{ + struct pktgen builder; + + pktgen__init(&builder, ctx); + if (!pktgen__push_data(&builder, default_data, sizeof(default_data))) + return TEST_ERROR; + pktgen__finish(&builder); + return TEST_PASS; +} + +CHECK("tc", "extract_trace_id_with_no_eth_header_no_family") +int test_extract_trace_id_with_no_eth_header_no_family_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = TRACE_ID_NO_FAMILY; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test packet with IPv6 header should return TRACE_ID_SKIP_IPV6. */ +PKTGEN("tc", "extract_trace_id_no_ipv6_options") +int test_extract_trace_id_no_ipv6_options_pktgen(struct __ctx_buff *ctx) +{ + struct pktgen builder; + + pktgen__init(&builder, ctx); + if (!pktgen__push_ethhdr(&builder)) + return TEST_ERROR; + if (!pktgen__push_default_ipv6hdr(&builder)) + return TEST_ERROR; + if (!pktgen__push_data(&builder, default_data, sizeof(default_data))) + return TEST_ERROR; + + pktgen__finish(&builder); + return TEST_PASS; +} + +CHECK("tc", "extract_trace_id_no_ipv6_options") +int test_extract_trace_id_no_ipv6_options_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = TRACE_ID_SKIP_IPV6; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %lld; want %lld\n", trace_id, want); + test_finish(); +} + +/* Test a single option specifying the trace ID with no special cases. */ +PKTGEN("tc", "extract_trace_id_solo") +int test_extract_trace_id_solo_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 136, + .len = 4, + .data = (__u8 *)"\x00\x01", + .data_len = 2, + }, + }; + return gen_packet_with_options(ctx, opts, 1, 4); +} + +CHECK("tc", "extract_trace_id_solo") +int test_extract_trace_id_solo_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 1; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test packet with IPv4 header should return TRACE_ID_NOT_FOUND. */ +PKTGEN("tc", "extract_trace_id_no_ipv4_options") +int test_extract_trace_id_no_options_pktgen(struct __ctx_buff *ctx) +{ + struct pktgen builder; + + pktgen__init(&builder, ctx); + if (!pktgen__push_ethhdr(&builder)) + return TEST_ERROR; + if (!pktgen__push_iphdr(&builder, 0)) + return TEST_ERROR; + if (!pktgen__push_data(&builder, default_data, sizeof(default_data))) + return TEST_ERROR; + + pktgen__finish(&builder); + return TEST_PASS; +} + +CHECK("tc", "extract_trace_id_no_ipv4_options") +int test_extract_trace_id_no_options_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = TRACE_ID_NOT_FOUND; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test trace ID after END should return TRACE_ID_NOT_FOUND. */ +PKTGEN("tc", "extract_trace_id_after_ipopt_end_not_found") +int test_extract_trace_id_after_ipopt_end_not_found_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = IPOPT_END, + .len = 0, + .data_len = 0, + }, + { + .type = 136, + .len = 4, + .data = (__u8 *)"\x00\x01", + .data_len = 2, + }, + /* Add padding to align on 4-byte boundary. */ + { + .type = IPOPT_NOOP, + .len = 0, + .data_len = 0, + }, + { + .type = IPOPT_NOOP, + .len = 0, + .data_len = 0, + }, + { + .type = IPOPT_NOOP, + .len = 0, + .data_len = 0, + }, + }; + return gen_packet_with_options(ctx, opts, 5, 8); +} + +CHECK("tc", "extract_trace_id_after_ipopt_end_not_found") +int test_extract_trace_id_after_ipopt_end_not_found_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = TRACE_ID_NOT_FOUND; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test trace ID comes after loop limit should return TRACE_ID_NOT_FOUND. */ +PKTGEN("tc", "extract_trace_id_after_loop_limit_not_found") +int test_extract_trace_id_after_loop_limit_not_found_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = IPOPT_NOOP, + .len = 0, + .data_len = 0, + }, + { + .type = IPOPT_NOOP, + .len = 0, + .data_len = 0, + }, + { + .type = IPOPT_NOOP, + .len = 0, + .data_len = 0, + }, + /* The loop limit is 3 so the following options are ignored. */ + { + .type = 136, + .len = 4, + .data = (__u8 *)"\x00\x01", + .data_len = 2, + }, + { + .type = IPOPT_NOOP, + .len = 0, + .data_len = 0, + }, + }; + return gen_packet_with_options(ctx, opts, 5, 8); +} + +CHECK("tc", "extract_trace_id_after_loop_limit_not_found") +int test_extract_trace_id_after_loop_limit_not_found_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = TRACE_ID_NOT_FOUND; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test three options with the trace ID option being first. */ +PKTGEN("tc", "extract_trace_id_first_of_three") +int test_extract_trace_id_first_of_three_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 136, + .len = 4, + .data = (__u8 *)"\x00\x01", + .data_len = 2, + }, + { + .type = 10, + .len = 4, + .data = (__u8 *)"\x10\x10", + .data_len = 2, + }, + { + .type = 11, + .len = 4, + .data = (__u8 *)"\x11\x11", + .data_len = 2, + }, + }; + return gen_packet_with_options(ctx, opts, 3, 12); +} + +CHECK("tc", "extract_trace_id_first_of_three") +int test_extract_trace_id_first_of_three_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 1; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test three options with the trace ID option being between the other two. */ +PKTGEN("tc", "extract_trace_id_middle_of_three") +int test_extract_trace_id_middle_of_three_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 10, + .len = 4, + .data = (__u8 *)"\x10\x10", + .data_len = 2, + }, + { + .type = 136, + .len = 4, + .data = (__u8 *)"\x00\x01", + .data_len = 2, + }, + { + .type = 11, + .len = 4, + .data = (__u8 *)"\x11\x11", + .data_len = 2, + }, + }; + return gen_packet_with_options(ctx, opts, 3, 12); +} + +CHECK("tc", "extract_trace_id_middle_of_three") +int test_extract_trace_id_middle_of_three_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 1; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test three options with the trace ID option being last of the three. */ +PKTGEN("tc", "extract_trace_id_last_of_three") +int test_extract_trace_id_last_of_three_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 10, + .len = 4, + .data = (__u8 *)"\x10\x10", + .data_len = 2, + }, + { + .type = 11, + .len = 4, + .data = (__u8 *)"\x11\x11", + .data_len = 2, + }, + { + .type = 136, + .len = 4, + .data = (__u8 *)"\x00\x01", + .data_len = 2, + }, + }; + + return gen_packet_with_options(ctx, opts, 3, 12); +} + +CHECK("tc", "extract_trace_id_last_of_three") +int test_extract_trace_id_last_of_three_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 1; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test multiple options with the trace ID coming after a NOOP option. */ +PKTGEN("tc", "extract_trace_id_after_ipopt_noop") +int test_extract_trace_id_after_ipopt_noop_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = IPOPT_NOOP, + .len = 0, /* Single byte option. */ + .data_len = 0, + }, + { + .type = IPOPT_NOOP, + .len = 0, /* Single byte option. */ + .data_len = 0, + }, + { + .type = 136, + .len = 4, + .data = (__u8 *)"\x00\x01", + .data_len = 2, + }, + { + .type = IPOPT_NOOP, + .len = 0, /* Single byte option. */ + .data_len = 0, + }, + { + .type = IPOPT_NOOP, + .len = 0, /* Single byte option. */ + .data_len = 0, + }, + }; + return gen_packet_with_options(ctx, opts, 5, 8); +} + +CHECK("tc", "extract_trace_id_after_ipopt_noop") +int test_extract_trace_id_after_ipopt_noop_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 1; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test multiple options with the trace ID not present should return TRACE_ID_NOT_FOUND. */ +PKTGEN("tc", "extract_trace_id_not_found_with_other_options") +int test_extract_trace_id__not_found_with_other_options_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 10, + .len = 4, + .data = (__u8 *)"\x10\x10", + .data_len = 2, + }, + { + .type = 11, + .len = 4, + .data = (__u8 *)"\x11\x11", + .data_len = 2, + }, + }; + + return gen_packet_with_options(ctx, opts, 2, 8); +} + +CHECK("tc", "extract_trace_id_not_found_with_other_options") +int test_extract_trace_id_not_found_with_other_options_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = TRACE_ID_NOT_FOUND; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test trace ID with incorrect length field should return INVALID. */ +PKTGEN("tc", "extract_trace_id_wrong_len_invalid") +int test_extract_trace_id_wrong_len_invalid_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 136, + .len = 3, /* Invalid length with this option. */ + .data = (__u8 *)"\x00\x01", + .data_len = 2, + }, + }; + + return gen_packet_with_options(ctx, opts, 1, 4); +} + +CHECK("tc", "extract_trace_id_wrong_len_invalid") +int test_extract_trace_id_wrong_len_invalid_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = TRACE_ID_INVALID; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test trace ID with negative value should return TRACE_ID_INVALID. */ +PKTGEN("tc", "extract_trace_id_negative") +int test_extract_trace_id_negative_invalid_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 136, + .len = 4, + .data = (__u8 *)"\x80\x01", + .data_len = 2, + }, + }; + + return gen_packet_with_options(ctx, opts, 1, 4); +} + +CHECK("tc", "extract_trace_id_negative") +int test_extract_trace_id_negative_invalid_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 0x8001; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + + test_finish(); +} + +/* Store and read trace ID to different option than stream ID with 2 bytes of data. */ +PKTGEN("tc", "extract_trace_id_different_option_type") +int test_extract_trace_id_different_option_type_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 137, + .len = 4, + .data = (__u8 *)"\x00\x02", + .data_len = 2, + }, + }; + + return gen_packet_with_options(ctx, opts, 1, 4); +} + +CHECK("tc", "extract_trace_id_different_option_type") +int test_extract_trace_id_different_option_type_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 0x0002; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 137); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Read trace ID from wrong IP option. */ +PKTGEN("tc", "extract_read_trace_id_wrong_option_type") +int test_extract_read_trace_id_wrong_option_type_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 137, + .len = 4, + .data = (__u8 *)"\x00\x02", + .data_len = 2, + }, + }; + + return gen_packet_with_options(ctx, opts, 1, 4); +} + +CHECK("tc", "extract_read_trace_id_wrong_option_type") +int test_extract_read_trace_id_wrong_option_type_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = TRACE_ID_NOT_FOUND; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %d; want %d\n", trace_id, want); + test_finish(); +} + +/* Test a valid 4-byte trace ID. */ +PKTGEN("tc", "extract_trace_id_4_bytes_valid") +int test_extract_trace_id_4_bytes_valid_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 136, + .len = 6, + .data = (__u8 *)"\x00\x01\x23\x45", + .data_len = 4, + }, + }; + + return gen_packet_with_options(ctx, opts, 1, 8); +} + +CHECK("tc", "extract_trace_id_4_bytes_valid") +int test_extract_trace_id_4_bytes_valid_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 0x00012345; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %lld; want %lld\n", trace_id, want); + test_finish(); +} + +/* Test negative trace id should return valid. */ +PKTGEN("tc", "extract_trace_id_negative_4_bytes") +int test_extract_trace_id_negative_4_bytes_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 136, + .len = 6, + .data = (__u8 *)"\x80\x01\x23\x45", + .data_len = 4, + }, + }; + + return gen_packet_with_options(ctx, opts, 1, 8); +} + +CHECK("tc", "extract_trace_id_negative_4_bytes") +int test_extract_trace_id_negative_4_bytes_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 0x80012345; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %lld; want %lld\n", trace_id, want); + test_finish(); +} + +/* Test a 4-byte trace ID with incorrect length. */ +PKTGEN("tc", "extract_trace_id_4_bytes_wrong_length") +int test_extract_trace_id_4_bytes_wrong_length_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 136, + .len = 5, /* Incorrect length */ + .data = (__u8 *)"\x01\x23\x45\x67", + .data_len = 4, + }, + }; + return gen_packet_with_options(ctx, opts, 1, 8); +} + +CHECK("tc", "extract_trace_id_4_bytes_wrong_length") +int test_extract_trace_id_4_bytes_wrong_length_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = TRACE_ID_INVALID; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %lld; want %lld\n", trace_id, want); + test_finish(); +} + +/* Test a 4-byte trace ID before the end of option list. */ +PKTGEN("tc", "extract_trace_id_4_bytes_before_end") +int test_extract_trace_id_4_bytes_before_end_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 136, + .len = 6, + .data = (__u8 *)"\x00\x01\x23\x45", + .data_len = 4, + }, + { + .type = IPOPT_END, + .len = 0, + .data_len = 0, + }, + }; + return gen_packet_with_options(ctx, opts, 2, 8); +} + +CHECK("tc", "extract_trace_id_4_bytes_before_end") +int test_extract_trace_id_4_bytes_before_end_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 0x12345; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %lld; want %lld\n", trace_id, want); + test_finish(); +} + +/* Test a valid 8-byte trace ID should return TRACE_ID_ERROR. */ +PKTGEN("tc", "extract_trace_id_8_bytes_valid") +int test_extract_trace_id_8_bytes_valid_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 136, + .len = 10, + .data = (__u8 *)"\x12\x34\x56\x78\x9A\xBC\xDE\xF0", + .data_len = 8, + }, + }; + return gen_packet_with_options(ctx, opts, 1, 12); +} + +CHECK("tc", "extract_trace_id_8_bytes_valid") +int test_extract_trace_id_8_bytes_valid_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 0x123456789abcdef0; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %lld; want %lld\n", trace_id, want); + test_finish(); +} + +/* Test an 8-byte trace ID followed by padding. */ +PKTGEN("tc", "extract_trace_id_8_bytes_with_padding") +int test_extract_trace_id_8_bytes_with_padding_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 136, + .len = 10, /* Total length including type and len fields */ + .data = (__u8 *)"\x01\x02\x03\x04\x00\x00\x00\x00", + .data_len = 8, + }, + { + .type = IPOPT_NOOP, + .len = 0, + .data_len = 0, + }, + }; + + return gen_packet_with_options(ctx, opts, 2, 12); +} + +CHECK("tc", "extract_trace_id_8_bytes_with_padding") +int test_extract_trace_id_8_bytes_with_padding_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 0x0102030400000000; /* Expected valid trace ID */ + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %lld; want %lld\n", trace_id, want); + test_finish(); +} + +/* Test an 8-byte trace ID that represents a negative value. */ +PKTGEN("tc", "extract_trace_id_8_bytes_negative") +int test_extract_trace_id_8_bytes_negative_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 136, + .len = 10, + .data = (__u8 *)"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFA", + .data_len = 8, + }, + }; + + return gen_packet_with_options(ctx, opts, 1, 12); +} + +CHECK("tc", "extract_trace_id_8_bytes_negative") +int test_extract_trace_id_8_bytes_negative_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = 0xFFFFFFFFFFFFFFFA; + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %lld; want %lld\n", trace_id, want); + + test_finish(); +} + +/* Test an 8-byte trace ID with an invalid option length. */ +PKTGEN("tc", "extract_trace_id_8_bytes_invalid_length") +int test_extract_trace_id_8_bytes_invalid_length_pktgen(struct __ctx_buff *ctx) +{ + struct ipopthdr opts[] = { + { + .type = 136, + .len = 9, /* Invalid length, should be 10 */ + .data = (__u8 *)"\x01\x02\x03\x04\x05\x06\x07\x08", + .data_len = 8, + }, + }; + return gen_packet_with_options(ctx, opts, 1, 12); +} + +CHECK("tc", "extract_trace_id_8_bytes_invalid_length") +int test_extract_trace_id_8_bytes_invalid_length_check(struct __ctx_buff *ctx) +{ + test_init(); + __s64 want = TRACE_ID_INVALID; /* Expected invalid trace ID */ + __s64 trace_id = 0; + int ret; + + ret = trace_id_from_ctx(ctx, &trace_id, 136); + if (IS_ERR(ret)) + trace_id = ret; + + if (trace_id != want) + test_fatal("trace_id_from_ctx(ctx) = %lld; want %lld\n", trace_id, want); + test_finish(); +} diff --git a/bpf/tests/ipsec_from_host_generic.h b/bpf/tests/ipsec_from_host_generic.h deleted file mode 100644 index c83b335a0111c..0000000000000 --- a/bpf/tests/ipsec_from_host_generic.h +++ /dev/null @@ -1,288 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ -/* Copyright Authors of Cilium */ - -#include -#include "common.h" -#include "pktgen.h" - -#define NODE_ID 2333 -#define TUNNEL_ID 0x1234 -#define ENCRYPT_KEY 3 -#define ENABLE_IPV4 -#define ENABLE_IPV6 -#define ENABLE_IPSEC - -#define ENCAP_IFINDEX 4 -#define skb_set_tunnel_key mock_skb_set_tunnel_key -#define ctx_redirect mock_ctx_redirect - -int mock_skb_set_tunnel_key(__maybe_unused struct __sk_buff *skb, - const struct bpf_tunnel_key *from, - __maybe_unused __u32 size, - __maybe_unused __u32 flags) -{ - if (from->tunnel_id != TUNNEL_ID) - return -1; - if (from->local_ipv4 != 0) - return -2; - if (from->remote_ipv4 != bpf_htonl(v4_node_two)) - return -3; - return 0; -} - -int mock_ctx_redirect(const struct __sk_buff *ctx __maybe_unused, int ifindex, __u32 flags) -{ - if (ifindex != ENCAP_IFINDEX) - return -1; - if (flags != 0) - return -2; - return CTX_ACT_REDIRECT; -} - -#include "bpf_host.c" - -ASSIGN_CONFIG(__u32, security_label, TUNNEL_ID) - -#include "lib/ipcache.h" - -#define FROM_HOST 0 -#define ESP_SEQUENCE 69865 - -struct { - __uint(type, BPF_MAP_TYPE_PROG_ARRAY); - __uint(key_size, sizeof(__u32)); - __uint(max_entries, 1); - __array(values, int()); -} entry_call_map __section(".maps") = { - .values = { - [FROM_HOST] = &cil_from_host, - }, -}; - -PKTGEN("tc", "ipv4_ipsec_from_host") -int ipv4_ipsec_from_host_pktgen(struct __ctx_buff *ctx) -{ - struct pktgen builder; - struct iphdr *l3; - struct ip_esp_hdr *l4; - void *data; - - pktgen__init(&builder, ctx); - - l3 = pktgen__push_ipv4_packet(&builder, (__u8 *)mac_one, (__u8 *)mac_two, - v4_pod_one, v4_pod_two); - if (!l3) - return TEST_ERROR; - l4 = pktgen__push_default_esphdr(&builder); - if (!l4) - return TEST_ERROR; - l4->spi = ENCRYPT_KEY; - l4->seq_no = ESP_SEQUENCE; - - data = pktgen__push_data(&builder, default_data, sizeof(default_data)); - if (!data) - return TEST_ERROR; - - pktgen__finish(&builder); - return 0; -} - -SETUP("tc", "ipv4_ipsec_from_host") -int ipv4_ipsec_from_host_setup(struct __ctx_buff *ctx) -{ - /* This is the ipcache entry for the CiliumInternalIP of the remote node. - * It allows us to lookup the tunnel endpoint from the outer destination IP - * address of the ESP packet. The CiliumInternalIPs are used for that outer - * header. - */ - ipcache_v4_add_entry(v4_pod_two, 0, 233, v4_node_two, 0); - - ctx->mark = ipsec_encode_encryption_mark(ENCRYPT_KEY, NODE_ID); - set_identity_meta(ctx, SECLABEL_IPV4); - tail_call_static(ctx, entry_call_map, FROM_HOST); - return TEST_ERROR; -} - -CHECK("tc", "ipv4_ipsec_from_host") -int ipv4_ipsec_from_host_check(__maybe_unused const struct __ctx_buff *ctx) -{ - void *data; - void *data_end; - __u32 *status_code; - struct ethhdr *l2; - struct iphdr *l3; - struct ip_esp_hdr *l4; - __u8 *payload; - - test_init(); - - data = (void *)(long)ctx->data; - data_end = (void *)(long)ctx->data_end; - - if (data + sizeof(*status_code) > data_end) - test_fatal("status code out of bounds"); - - status_code = data; - assert(*status_code == EXPECTED_STATUS_CODE); - - assert(ctx->mark == 0); - - l2 = data + sizeof(*status_code); - - if ((void *)l2 + sizeof(struct ethhdr) > data_end) - test_fatal("l2 out of bounds"); - - if (l2->h_proto != bpf_htons(ETH_P_IP)) - test_fatal("l2 proto hasn't been set to ETH_P_IP"); - - if (memcmp(l2->h_source, (__u8 *)mac_one, ETH_ALEN) != 0) - test_fatal("src mac hasn't been set to source ep's mac"); - - if (memcmp(l2->h_dest, (__u8 *)mac_two, ETH_ALEN) != 0) - test_fatal("dest mac hasn't been set to dest ep's mac"); - - l3 = (void *)l2 + sizeof(struct ethhdr); - - if ((void *)l3 + sizeof(struct iphdr) > data_end) - test_fatal("l3 out of bounds"); - - if (l3->saddr != v4_pod_one) - test_fatal("src IP was changed"); - - if (l3->daddr != v4_pod_two) - test_fatal("dest IP was changed"); - - if (l3->check != bpf_htons(0xf948)) - test_fatal("L3 checksum is invalid: %x", bpf_htons(l3->check)); - - l4 = (void *)l3 + sizeof(struct iphdr); - - if ((void *)l4 + sizeof(struct ip_esp_hdr) > data_end) - test_fatal("l4 out of bounds"); - - if (l4->spi != ENCRYPT_KEY) - test_fatal("ESP spi was changed"); - - if (l4->seq_no != ESP_SEQUENCE) - test_fatal("ESP seq was changed"); - - payload = (void *)l4 + sizeof(struct ip_esp_hdr); - if ((void *)payload + sizeof(default_data) > data_end) - test_fatal("paylaod out of bounds\n"); - - if (memcmp(payload, default_data, sizeof(default_data)) != 0) - test_fatal("tcp payload was changed"); - - test_finish(); -} - -PKTGEN("tc", "ipv6_ipsec_from_host") -int ipv6_ipsec_from_host_pktgen(struct __ctx_buff *ctx) -{ - struct pktgen builder; - struct ipv6hdr *l3; - struct ip_esp_hdr *l4; - void *data; - - pktgen__init(&builder, ctx); - - l3 = pktgen__push_ipv6_packet(&builder, (__u8 *)mac_one, (__u8 *)mac_two, - (__u8 *)&v6_pod_one, (__u8 *)&v6_pod_two); - if (!l3) - return TEST_ERROR; - - l4 = pktgen__push_default_esphdr(&builder); - if (!l4) - return TEST_ERROR; - l4->spi = ENCRYPT_KEY; - l4->seq_no = ESP_SEQUENCE; - - data = pktgen__push_data(&builder, default_data, sizeof(default_data)); - if (!data) - return TEST_ERROR; - - pktgen__finish(&builder); - return 0; -} - -SETUP("tc", "ipv6_ipsec_from_host") -int ipv6_ipsec_from_host_setup(struct __ctx_buff *ctx) -{ - /* See comment for IPv4 counterpart. */ - ipcache_v6_add_entry((union v6addr *)v6_pod_two, 0, 233, v4_node_two, 0); - - ctx->mark = ipsec_encode_encryption_mark(ENCRYPT_KEY, NODE_ID); - set_identity_meta(ctx, SECLABEL_IPV6); - tail_call_static(ctx, entry_call_map, FROM_HOST); - return TEST_ERROR; -} - -CHECK("tc", "ipv6_ipsec_from_host") -int ipv6_ipsec_from_host_check(__maybe_unused const struct __ctx_buff *ctx) -{ - void *data; - void *data_end; - __u32 *status_code; - struct ethhdr *l2; - struct ipv6hdr *l3; - struct ip_esp_hdr *l4; - __u8 *payload; - - test_init(); - - data = (void *)(long)ctx->data; - data_end = (void *)(long)ctx->data_end; - - if (data + sizeof(*status_code) > data_end) - test_fatal("status code out of bounds"); - - status_code = data; - assert(*status_code == EXPECTED_STATUS_CODE); - - assert(ctx->mark == 0); - - l2 = data + sizeof(*status_code); - - if ((void *)l2 + sizeof(struct ethhdr) > data_end) - test_fatal("l2 out of bounds"); - - if (l2->h_proto != bpf_htons(ETH_P_IPV6)) - test_fatal("l2 proto hasn't been set to ETH_P_IP"); - - if (memcmp(l2->h_source, (__u8 *)mac_one, ETH_ALEN) != 0) - test_fatal("src mac hasn't been set to source ep's mac"); - - if (memcmp(l2->h_dest, (__u8 *)mac_two, ETH_ALEN) != 0) - test_fatal("dest mac hasn't been set to dest ep's mac"); - - l3 = (void *)l2 + sizeof(struct ethhdr); - - if ((void *)l3 + sizeof(struct ipv6hdr) > data_end) - test_fatal("l3 out of bounds"); - - if (memcmp((__u8 *)&l3->saddr, (__u8 *)v6_pod_one, 16) != 0) - test_fatal("src IP was changed"); - - if (memcmp((__u8 *)&l3->daddr, (__u8 *)v6_pod_two, 16) != 0) - test_fatal("dest IP was changed"); - - l4 = (void *)l3 + sizeof(struct ipv6hdr); - - if ((void *)l4 + sizeof(struct ip_esp_hdr) > data_end) - test_fatal("l4 out of bounds"); - - if (l4->spi != ENCRYPT_KEY) - test_fatal("ESP spi was changed"); - - if (l4->seq_no != ESP_SEQUENCE) - test_fatal("ESP seq was changed"); - - payload = (void *)l4 + sizeof(struct ip_esp_hdr); - if ((void *)payload + sizeof(default_data) > data_end) - test_fatal("paylaod out of bounds\n"); - - if (memcmp(payload, default_data, sizeof(default_data)) != 0) - test_fatal("tcp payload was changed"); - - test_finish(); -} diff --git a/bpf/tests/ipsec_from_host_native.c b/bpf/tests/ipsec_from_host_native.c deleted file mode 100644 index b46a169934501..0000000000000 --- a/bpf/tests/ipsec_from_host_native.c +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -/* Copyright Authors of Cilium */ - -#define ENABLE_ROUTING - -#define EXPECTED_STATUS_CODE CTX_ACT_OK - -#include "ipsec_from_host_generic.h" diff --git a/bpf/tests/ipsec_from_host_native_endpoint.c b/bpf/tests/ipsec_from_host_native_endpoint.c deleted file mode 100644 index cbbcbae107813..0000000000000 --- a/bpf/tests/ipsec_from_host_native_endpoint.c +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -/* Copyright Authors of Cilium */ - -#define ENABLE_ENDPOINT_ROUTES 1 - -#define EXPECTED_STATUS_CODE CTX_ACT_OK - -#include "ipsec_from_host_generic.h" diff --git a/bpf/tests/ipsec_from_host_tunnel.c b/bpf/tests/ipsec_from_host_tunnel.c deleted file mode 100644 index d6a1b6c04e1e7..0000000000000 --- a/bpf/tests/ipsec_from_host_tunnel.c +++ /dev/null @@ -1,9 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -/* Copyright Authors of Cilium */ - -#define TUNNEL_MODE -#define ENABLE_ROUTING - -#define EXPECTED_STATUS_CODE CTX_ACT_OK - -#include "ipsec_from_host_generic.h" diff --git a/bpf/tests/ipsec_from_host_tunnel_endpoint.c b/bpf/tests/ipsec_from_host_tunnel_endpoint.c deleted file mode 100644 index 36e4336ebbe94..0000000000000 --- a/bpf/tests/ipsec_from_host_tunnel_endpoint.c +++ /dev/null @@ -1,9 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -/* Copyright Authors of Cilium */ - -#define TUNNEL_MODE -#define ENABLE_ENDPOINT_ROUTES 1 - -#define EXPECTED_STATUS_CODE CTX_ACT_OK - -#include "ipsec_from_host_generic.h" diff --git a/bpf/tests/ipsec_from_overlay_generic.h b/bpf/tests/ipsec_from_overlay_generic.h deleted file mode 100644 index 860e7b52b120c..0000000000000 --- a/bpf/tests/ipsec_from_overlay_generic.h +++ /dev/null @@ -1,529 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ -/* Copyright Authors of Cilium */ - -#define NODE_ID 2333 -#define ENCRYPT_KEY 3 -#define ENABLE_IPV4 -#define ENABLE_IPV6 -#define ENABLE_IPSEC -#define TUNNEL_MODE -#define ENCAP_IFINDEX 4 -#define DEST_IFINDEX 5 -#define DEST_LXC_ID 200 - -#include -#include "common.h" -#include "pktgen.h" - -#define skb_change_type mock_skb_change_type -int mock_skb_change_type(__maybe_unused struct __sk_buff *skb, __u32 type) -{ - if (type != PACKET_HOST) - return -1; - return 0; -} - -#define skb_get_tunnel_key mock_skb_get_tunnel_key -int mock_skb_get_tunnel_key(__maybe_unused struct __sk_buff *skb, - struct bpf_tunnel_key *to, - __maybe_unused __u32 size, - __maybe_unused __u32 flags) -{ - to->remote_ipv4 = v4_node_one; - /* 0xfffff is the default SECLABEL */ - to->tunnel_id = 0xfffff; - return 0; -} - -__section_entry -int mock_handle_policy(struct __ctx_buff *ctx __maybe_unused) -{ - /* https://github.com/cilium/cilium/blob/v1.16.0-pre.1/bpf/bpf_lxc.c#L2040 */ -#if defined(ENABLE_ENDPOINT_ROUTES) && !defined(ENABLE_NODEPORT) - return TC_ACT_OK; -#else - return TC_ACT_REDIRECT; -#endif -} - -struct { - __uint(type, BPF_MAP_TYPE_PROG_ARRAY); - __uint(key_size, sizeof(__u32)); - __uint(max_entries, 256); - __array(values, int()); -} mock_policy_call_map __section(".maps") = { - .values = { - [DEST_LXC_ID] = &mock_handle_policy, - }, -}; - -#define tail_call_dynamic mock_tail_call_dynamic -static __always_inline __maybe_unused void -mock_tail_call_dynamic(struct __ctx_buff *ctx __maybe_unused, - const void *map __maybe_unused, __u32 slot __maybe_unused) -{ - tail_call(ctx, &mock_policy_call_map, slot); -} - -static volatile const __u8 *DEST_EP_MAC = mac_three; -static volatile const __u8 *DEST_NODE_MAC = mac_four; - -#include "bpf_overlay.c" - -#include "lib/endpoint.h" -#include "lib/ipcache.h" -#include "lib/node.h" - -#define FROM_OVERLAY 0 -#define ESP_SEQUENCE 69865 - -struct { - __uint(type, BPF_MAP_TYPE_PROG_ARRAY); - __uint(key_size, sizeof(__u32)); - __uint(max_entries, 1); - __array(values, int()); -} entry_call_map __section(".maps") = { - .values = { - [FROM_OVERLAY] = &cil_from_overlay, - }, -}; - -PKTGEN("tc", "ipv4_not_decrypted_ipsec_from_overlay") -int ipv4_not_decrypted_ipsec_from_overlay_pktgen(struct __ctx_buff *ctx) -{ - struct pktgen builder; - struct iphdr *l3; - struct ip_esp_hdr *l4; - void *data; - - pktgen__init(&builder, ctx); - - l3 = pktgen__push_ipv4_packet(&builder, (__u8 *)mac_one, (__u8 *)mac_two, - v4_pod_one, v4_pod_two); - if (!l3) - return TEST_ERROR; - - l4 = pktgen__push_default_esphdr(&builder); - if (!l4) - return TEST_ERROR; - l4->spi = ENCRYPT_KEY; - l4->seq_no = ESP_SEQUENCE; - - data = pktgen__push_data(&builder, default_data, sizeof(default_data)); - if (!data) - return TEST_ERROR; - - pktgen__finish(&builder); - return 0; -} - -SETUP("tc", "ipv4_not_decrypted_ipsec_from_overlay") -int ipv4_not_decrypted_ipsec_from_overlay_setup(struct __ctx_buff *ctx) -{ - /* We need to populate the node ID map because we'll lookup into it on - * ingress to find the node ID to use to match against XFRM IN states. - */ - node_v4_add_entry(v4_pod_one, NODE_ID, ENCRYPT_KEY); - - tail_call_static(ctx, entry_call_map, FROM_OVERLAY); - return TEST_ERROR; -} - -CHECK("tc", "ipv4_not_decrypted_ipsec_from_overlay") -int ipv4_not_decrypted_ipsec_from_overlay_check(__maybe_unused const struct __ctx_buff *ctx) -{ - void *data; - void *data_end; - __u32 *status_code; - struct ethhdr *l2; - struct iphdr *l3; - struct ip_esp_hdr *l4; - __u8 *payload; - - test_init(); - - data = (void *)(long)ctx->data; - data_end = (void *)(long)ctx->data_end; - - if (data + sizeof(*status_code) > data_end) - test_fatal("status code out of bounds"); - - status_code = data; - assert(*status_code == CTX_ACT_OK); - assert(ctx->mark == (MARK_MAGIC_DECRYPT | NODE_ID << 16)); - - l2 = data + sizeof(*status_code); - - if ((void *)l2 + sizeof(struct ethhdr) > data_end) - test_fatal("l2 out of bounds"); - - if (l2->h_proto != bpf_htons(ETH_P_IP)) - test_fatal("l2 proto hasn't been set to ETH_P_IP"); - - if (memcmp(l2->h_source, (__u8 *)mac_one, ETH_ALEN) != 0) - test_fatal("src mac hasn't been set to source ep's mac"); - - if (memcmp(l2->h_dest, (__u8 *)mac_two, ETH_ALEN) != 0) - test_fatal("dest mac hasn't been set to dest ep's mac"); - - l3 = (void *)l2 + sizeof(struct ethhdr); - - if ((void *)l3 + sizeof(struct iphdr) > data_end) - test_fatal("l3 out of bounds"); - - if (l3->saddr != v4_pod_one) - test_fatal("src IP was changed"); - - if (l3->daddr != v4_pod_two) - test_fatal("dest IP was changed"); - - if (l3->check != bpf_htons(0xf948)) - test_fatal("L3 checksum is invalid: %x", bpf_htons(l3->check)); - - l4 = (void *)l3 + sizeof(struct iphdr); - - if ((void *)l4 + sizeof(struct ip_esp_hdr) > data_end) - test_fatal("l4 out of bounds"); - - if (l4->spi != ENCRYPT_KEY) - test_fatal("ESP spi was changed"); - - if (l4->seq_no != ESP_SEQUENCE) - test_fatal("ESP seq was changed"); - - payload = (void *)l4 + sizeof(struct ip_esp_hdr); - if ((void *)payload + sizeof(default_data) > data_end) - test_fatal("paylaod out of bounds\n"); - - if (memcmp(payload, default_data, sizeof(default_data)) != 0) - test_fatal("tcp payload was changed"); - - test_finish(); -} - -PKTGEN("tc", "ipv6_not_decrypted_ipsec_from_overlay") -int ipv6_not_decrypted_ipsec_from_overlay_pktgen(struct __ctx_buff *ctx) -{ - struct pktgen builder; - struct ipv6hdr *l3; - struct ip_esp_hdr *l4; - void *data; - - pktgen__init(&builder, ctx); - - l3 = pktgen__push_ipv6_packet(&builder, (__u8 *)mac_one, (__u8 *)mac_two, - (__u8 *)v6_pod_one, (__u8 *)v6_pod_two); - if (!l3) - return TEST_ERROR; - - l4 = pktgen__push_default_esphdr(&builder); - if (!l4) - return TEST_ERROR; - l4->spi = ENCRYPT_KEY; - l4->seq_no = ESP_SEQUENCE; - - data = pktgen__push_data(&builder, default_data, sizeof(default_data)); - if (!data) - return TEST_ERROR; - - pktgen__finish(&builder); - return 0; -} - -SETUP("tc", "ipv6_not_decrypted_ipsec_from_overlay") -int ipv6_not_decrypted_ipsec_from_overlay_setup(struct __ctx_buff *ctx) -{ - /* We need to populate the node ID map because we'll lookup into it on - * ingress to find the node ID to use to match against XFRM IN states. - */ - node_v6_add_entry((union v6addr *)v6_pod_one, NODE_ID, ENCRYPT_KEY); - - tail_call_static(ctx, entry_call_map, FROM_OVERLAY); - return TEST_ERROR; -} - -CHECK("tc", "ipv6_not_decrypted_ipsec_from_overlay") -int ipv6_not_decrypted_ipsec_from_overlay_check(__maybe_unused const struct __ctx_buff *ctx) -{ - void *data; - void *data_end; - __u32 *status_code; - struct ethhdr *l2; - struct ipv6hdr *l3; - struct ip_esp_hdr *l4; - __u8 *payload; - - test_init(); - - data = (void *)(long)ctx->data; - data_end = (void *)(long)ctx->data_end; - - if (data + sizeof(*status_code) > data_end) - test_fatal("status code out of bounds"); - - status_code = data; - assert(*status_code == CTX_ACT_OK); - assert(ctx->mark == (MARK_MAGIC_DECRYPT | NODE_ID << 16)); - - l2 = data + sizeof(*status_code); - - if ((void *)l2 + sizeof(struct ethhdr) > data_end) - test_fatal("l2 out of bounds"); - - if (l2->h_proto != bpf_htons(ETH_P_IPV6)) - test_fatal("l2 proto hasn't been set to ETH_P_IP"); - - if (memcmp(l2->h_source, (__u8 *)mac_one, ETH_ALEN) != 0) - test_fatal("src mac hasn't been set to source ep's mac"); - - if (memcmp(l2->h_dest, (__u8 *)mac_two, ETH_ALEN) != 0) - test_fatal("dest mac hasn't been set to dest ep's mac"); - - l3 = (void *)l2 + sizeof(struct ethhdr); - - if ((void *)l3 + sizeof(struct ipv6hdr) > data_end) - test_fatal("l3 out of bounds"); - - if (memcmp((__u8 *)&l3->saddr, (__u8 *)v6_pod_one, 16) != 0) - test_fatal("src IP was changed"); - - if (memcmp((__u8 *)&l3->daddr, (__u8 *)v6_pod_two, 16) != 0) - test_fatal("dest IP was changed"); - - l4 = (void *)l3 + sizeof(struct ipv6hdr); - - if ((void *)l4 + sizeof(struct ip_esp_hdr) > data_end) - test_fatal("l4 out of bounds"); - - if (l4->spi != ENCRYPT_KEY) - test_fatal("ESP spi was changed"); - - if (l4->seq_no != ESP_SEQUENCE) - test_fatal("ESP seq was changed"); - - payload = (void *)l4 + sizeof(struct ip_esp_hdr); - if ((void *)payload + sizeof(default_data) > data_end) - test_fatal("paylaod out of bounds\n"); - - if (memcmp(payload, default_data, sizeof(default_data)) != 0) - test_fatal("tcp payload was changed"); - - test_finish(); -} - -PKTGEN("tc", "ipv4_decrypted_ipsec_from_overlay") -int ipv4_decrypted_ipsec_from_overlay_pktgen(struct __ctx_buff *ctx) -{ - struct pktgen builder; - struct tcphdr *l4; - void *data; - - pktgen__init(&builder, ctx); - - l4 = pktgen__push_ipv4_tcp_packet(&builder, - (__u8 *)mac_one, (__u8 *)mac_two, - v4_pod_one, v4_pod_two, - tcp_src_one, tcp_svc_one); - if (!l4) - return TEST_ERROR; - - data = pktgen__push_data(&builder, default_data, sizeof(default_data)); - if (!data) - return TEST_ERROR; - - pktgen__finish(&builder); - return 0; -} - -SETUP("tc", "ipv4_decrypted_ipsec_from_overlay") -int ipv4_decrypted_ipsec_from_overlay_setup(struct __ctx_buff *ctx) -{ - endpoint_v4_add_entry(v4_pod_two, DEST_IFINDEX, DEST_LXC_ID, 0, 0, 0, - (__u8 *)DEST_EP_MAC, (__u8 *)DEST_NODE_MAC); - - ctx->mark = MARK_MAGIC_DECRYPT; - tail_call_static(ctx, entry_call_map, FROM_OVERLAY); - return TEST_ERROR; -} - -CHECK("tc", "ipv4_decrypted_ipsec_from_overlay") -int ipv4_decrypted_ipsec_from_overlay_check(__maybe_unused const struct __ctx_buff *ctx) -{ - void *data; - void *data_end; - __u32 *status_code; - struct ethhdr *l2; - struct iphdr *l3; - struct tcphdr *l4; - __u8 *payload; - - test_init(); - - data = (void *)(long)ctx->data; - data_end = (void *)(long)ctx->data_end; - - if (data + sizeof(*status_code) > data_end) - test_fatal("status code out of bounds"); - - status_code = data; - assert(*status_code == EXPECTED_STATUS_CODE_FOR_DECRYPTED); - assert(ctx->mark == 0); - - l2 = data + sizeof(*status_code); - - if ((void *)l2 + sizeof(struct ethhdr) > data_end) - test_fatal("l2 out of bounds"); - - if (l2->h_proto != bpf_htons(ETH_P_IP)) - test_fatal("l2 proto hasn't been set to ETH_P_IP"); - - if (memcmp(l2->h_source, (__u8 *)DEST_NODE_MAC, ETH_ALEN) != 0) - test_fatal("src mac hasn't been set to source ep's mac"); - - if (memcmp(l2->h_dest, (__u8 *)DEST_EP_MAC, ETH_ALEN) != 0) - test_fatal("dest mac hasn't been set to dest ep's mac"); - - l3 = (void *)l2 + sizeof(struct ethhdr); - - if ((void *)l3 + sizeof(struct iphdr) > data_end) - test_fatal("l3 out of bounds"); - - if (l3->saddr != v4_pod_one) - test_fatal("src IP was changed"); - - if (l3->daddr != v4_pod_two) - test_fatal("dest IP was changed"); - - if (l3->check != bpf_htons(0xfa68)) - test_fatal("L3 checksum is invalid: %x", bpf_htons(l3->check)); - - l4 = (void *)l3 + sizeof(struct iphdr); - - if ((void *)l4 + sizeof(struct tcphdr) > data_end) - test_fatal("l4 out of bounds"); - - if (l4->source != tcp_src_one) - test_fatal("src TCP port was changed"); - - if (l4->dest != tcp_svc_one) - test_fatal("dst TCP port was changed"); - - if (l4->check != bpf_htons(0x589c)) - test_fatal("L4 checksum is invalid: %x", bpf_htons(l4->check)); - - payload = (void *)l4 + sizeof(struct tcphdr); - if ((void *)payload + sizeof(default_data) > data_end) - test_fatal("paylaod out of bounds\n"); - - if (memcmp(payload, default_data, sizeof(default_data)) != 0) - test_fatal("tcp payload was changed"); - - test_finish(); -} - -PKTGEN("tc", "ipv6_decrypted_ipsec_from_overlay") -int ipv6_decrypted_ipsec_from_overlay_pktgen(struct __ctx_buff *ctx) -{ - struct pktgen builder; - struct tcphdr *l4; - void *data; - - pktgen__init(&builder, ctx); - - l4 = pktgen__push_ipv6_tcp_packet(&builder, - (__u8 *)mac_one, (__u8 *)mac_two, - (__u8 *)v6_pod_one, (__u8 *)v6_pod_two, - tcp_src_one, tcp_svc_one); - if (!l4) - return TEST_ERROR; - - data = pktgen__push_data(&builder, default_data, sizeof(default_data)); - if (!data) - return TEST_ERROR; - - pktgen__finish(&builder); - return 0; -} - -SETUP("tc", "ipv6_decrypted_ipsec_from_overlay") -int ipv6_decrypted_ipsec_from_overlay_setup(struct __ctx_buff *ctx) -{ - endpoint_v6_add_entry((union v6addr *)v6_pod_two, DEST_IFINDEX, DEST_LXC_ID, - 0, 0, (__u8 *)DEST_EP_MAC, (__u8 *)DEST_NODE_MAC); - - ctx->mark = MARK_MAGIC_DECRYPT; - tail_call_static(ctx, entry_call_map, FROM_OVERLAY); - return TEST_ERROR; -} - -CHECK("tc", "ipv6_decrypted_ipsec_from_overlay") -int ipv6_decrypted_ipsec_from_overlay_check(__maybe_unused const struct __ctx_buff *ctx) -{ - void *data; - void *data_end; - __u32 *status_code; - struct ethhdr *l2; - struct ipv6hdr *l3; - struct tcphdr *l4; - __u8 *payload; - - test_init(); - - data = (void *)(long)ctx->data; - data_end = (void *)(long)ctx->data_end; - - if (data + sizeof(*status_code) > data_end) - test_fatal("status code out of bounds"); - - status_code = data; - assert(*status_code == EXPECTED_STATUS_CODE_FOR_DECRYPTED); - assert(ctx->mark == 0); - - l2 = data + sizeof(*status_code); - - if ((void *)l2 + sizeof(struct ethhdr) > data_end) - test_fatal("l2 out of bounds"); - - if (l2->h_proto != bpf_htons(ETH_P_IPV6)) - test_fatal("l2 proto hasn't been set to ETH_P_IP"); - - if (memcmp(l2->h_source, (__u8 *)DEST_NODE_MAC, ETH_ALEN) != 0) - test_fatal("src mac hasn't been set to source ep's mac"); - - if (memcmp(l2->h_dest, (__u8 *)DEST_EP_MAC, ETH_ALEN) != 0) - test_fatal("dest mac hasn't been set to dest ep's mac"); - - l3 = (void *)l2 + sizeof(struct ethhdr); - - if ((void *)l3 + sizeof(struct ipv6hdr) > data_end) - test_fatal("l3 out of bounds"); - - if (memcmp((__u8 *)&l3->saddr, (__u8 *)v6_pod_one, 16) != 0) - test_fatal("src IP was changed"); - - if (memcmp((__u8 *)&l3->daddr, (__u8 *)v6_pod_two, 16) != 0) - test_fatal("dest IP was changed"); - - l4 = (void *)l3 + sizeof(struct ipv6hdr); - - if ((void *)l4 + sizeof(struct tcphdr) > data_end) - test_fatal("l4 out of bounds"); - - if (l4->source != tcp_src_one) - test_fatal("src TCP port was changed"); - - if (l4->dest != tcp_svc_one) - test_fatal("dst TCP port was changed"); - - if (l4->check != bpf_htons(0xdfe3)) - test_fatal("L4 checksum is invalid: %x", bpf_htons(l4->check)); - - payload = (void *)l4 + sizeof(struct tcphdr); - if ((void *)payload + sizeof(default_data) > data_end) - test_fatal("paylaod out of bounds\n"); - - if (memcmp(payload, default_data, sizeof(default_data)) != 0) - test_fatal("tcp payload was changed"); - - test_finish(); -} diff --git a/bpf/tests/ipsec_from_overlay_tunnel.c b/bpf/tests/ipsec_from_overlay_tunnel.c deleted file mode 100644 index 7099e1a9bd0d7..0000000000000 --- a/bpf/tests/ipsec_from_overlay_tunnel.c +++ /dev/null @@ -1,6 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -/* Copyright Authors of Cilium */ - -#define EXPECTED_STATUS_CODE_FOR_DECRYPTED TC_ACT_REDIRECT - -#include "ipsec_from_overlay_generic.h" diff --git a/bpf/tests/ipsec_from_overlay_tunnel_endpoint.c b/bpf/tests/ipsec_from_overlay_tunnel_endpoint.c deleted file mode 100644 index 63b74ef967191..0000000000000 --- a/bpf/tests/ipsec_from_overlay_tunnel_endpoint.c +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -/* Copyright Authors of Cilium */ - -#define ENABLE_ENDPOINT_ROUTES 1 - -#define EXPECTED_STATUS_CODE_FOR_DECRYPTED TC_ACT_OK - -#include "ipsec_from_overlay_generic.h" diff --git a/bpf/tests/ipsec_redirect_tunnel.c b/bpf/tests/ipsec_redirect_tunnel.c index 6182ff5c2b122..0416e58900657 100644 --- a/bpf/tests/ipsec_redirect_tunnel.c +++ b/bpf/tests/ipsec_redirect_tunnel.c @@ -9,8 +9,7 @@ /* must define `HAVE_ENCAP 1` before including 'lib/encrypt.h'. * lib/encrypt.h eventually imports overloadable_skb.h which exposes - * ctx_is_overlay and ctx_is_overlay_encrypted, utilized within - * 'ipsec_maybe_redirect_to_encrypt' + * ctx_is_overlay, utilized within 'ipsec_maybe_redirect_to_encrypt' */ #define HAVE_ENCAP 1 #include "lib/encrypt.h" @@ -35,15 +34,6 @@ int ipsec_redirect_checks(__maybe_unused struct __ctx_buff *ctx, bool outer_ipv4 }; map_update_elem(&cilium_encrypt_state, &ret, &cfg, BPF_ANY); - /* Ensure we simply return from 'ipsec_maybe_redirect_to_encrypt' if - * the 'MARK_MAGIC_OVERLAY_ENCRYPTED' mark is set. - */ - TEST("overlay-encrypted-mark-set", { - ctx->mark = MARK_MAGIC_OVERLAY_ENCRYPTED; - ret = ipsec_maybe_redirect_to_encrypt(ctx, proto, SOURCE_IDENTITY); - assert(ret == CTX_ACT_OK); - }) - /* * Ensure encryption mark is set for overlay traffic with source * identity pod SOURCE_IDENTITY and CTX_ACT_REDIRECT is set. diff --git a/bpf/tests/ipv6_ndp_from_netdev_test.c b/bpf/tests/ipv6_ndp_from_netdev_test.c index 1cdc515fb380f..9689b85140ed6 100644 --- a/bpf/tests/ipv6_ndp_from_netdev_test.c +++ b/bpf/tests/ipv6_ndp_from_netdev_test.c @@ -13,8 +13,12 @@ #include "lib/ipcache.h" #include "lib/endpoint.h" +#include "scapy.h" + #define FROM_NETDEV 0 +ASSIGN_CONFIG(union macaddr, interface_mac, {.addr = mac_two_addr}) + struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); __uint(key_size, sizeof(__u32)); @@ -45,113 +49,24 @@ struct test_args { } __packed icmp_opt; }; -/* Generics */ -static __always_inline -int __ipv6_from_netdev_ns_pktgen(struct __ctx_buff *ctx, - struct test_args *args) -{ - struct pktgen builder; - struct icmp6hdr *l4; - void *data; - - pktgen__init(&builder, ctx); - - l4 = pktgen__push_ipv6_icmp6_packet(&builder, args->mac_src, - args->mac_dst, - (__u8 *)&args->ip_src, - (__u8 *)&args->ip_dst, - ICMP6_NS_MSG_TYPE); - if (!l4) - return TEST_ERROR; - - data = pktgen__push_data(&builder, (__u8 *)&args->icmp_ns_addr, - IPV6_ALEN); - if (!data) - return TEST_ERROR; - - if (args->llsrc_opt) { - data = pktgen__push_data(&builder, (__u8 *)&args->icmp_opt, - ICMP6_ND_OPT_LEN); - } - - pktgen__finish(&builder); - return 0; -} - +/* + * Generic + */ static __always_inline -int __ipv6_from_netdev_ns_check(const struct __ctx_buff *ctx, - struct test_args *args) +bool __check_ret_code(const struct __ctx_buff *ctx, const __u32 exp_rc) { void *data; void *data_end; __u32 *status_code; - struct ethhdr *l2; - struct ipv6hdr *l3; - struct icmp6hdr *l4; - void *target_addr, *opt; - - test_init(); data = (void *)(long)ctx->data; data_end = (void *)(long)ctx->data_end; if (data + sizeof(*status_code) > data_end) - test_fatal("status code out of bounds"); + return false; status_code = data; - assert(*status_code == args->status_code); - - l2 = data + sizeof(*status_code); - - if ((void *)l2 + sizeof(struct ethhdr) > data_end) - test_fatal("l2 out of bounds"); - - if (l2->h_proto != bpf_htons(ETH_P_IPV6)) - test_fatal("l2 proto hasn't been set to ETH_P_IPV6"); - - if (memcmp(l2->h_source, (__u8 *)args->mac_src, ETH_ALEN) != 0) - test_fatal("Incorrect mac_src"); - - if (memcmp(l2->h_dest, (__u8 *)args->mac_dst, ETH_ALEN) != 0) - test_fatal("Incorrect mac_dst"); - - l3 = (void *)l2 + sizeof(struct ethhdr); - - if ((void *)l3 + sizeof(struct ipv6hdr) > data_end) - test_fatal("l3 out of bounds"); - - if (memcmp((__u8 *)&l3->saddr, (__u8 *)&args->ip_src, IPV6_ALEN) != 0) - test_fatal("Incorrect ip_src"); - - if (memcmp((__u8 *)&l3->daddr, (__u8 *)&args->ip_dst, IPV6_ALEN) != 0) - test_fatal("Incorrect ip_dst"); - - l4 = (void *)l3 + sizeof(struct ipv6hdr); - - if ((void *)l4 + sizeof(struct icmp6hdr) > data_end) - test_fatal("l4 out of bounds"); - - if (l4->icmp6_type != args->icmp_type) - test_fatal("Invalid ICMP type"); - - target_addr = (void *)l4 + sizeof(struct icmp6hdr); - if ((void *)target_addr + IPV6_ALEN > data_end) - test_fatal("Target addr out of bounds"); - - if (memcmp(target_addr, (__u8 *)&args->icmp_ns_addr, IPV6_ALEN) != 0) - test_fatal("Incorrect icmp6 payload target addr"); - - if (args->llsrc_opt) { - opt = target_addr + IPV6_ALEN; - - if ((void *)opt + ICMP6_ND_OPT_LEN > data_end) - test_fatal("llsrc_opt out of bounds"); - - if (memcmp(opt, (__u8 *)&args->icmp_opt, ICMP6_ND_OPT_LEN) != 0) - test_fatal("Incorrect icmp6 payload type/length or target_lladdr"); - } - - test_finish(); + return *status_code == exp_rc; } /* @@ -168,56 +83,18 @@ int __ipv6_from_netdev_ns_pod_setup(struct __ctx_buff *ctx) return TEST_ERROR; } -static __always_inline -void __ipv6_from_netdev_ns_pod_pktgen_args(struct test_args *args, - bool llsrc_opt) -{ - __u8 llsrc_mac[] = {0x1, 0x1, 0x1, 0x1, 0x1, 0x1}; - - memcpy((__u8 *)args->mac_src, (__u8 *)mac_one, ETH_ALEN); - memcpy((__u8 *)args->mac_dst, (__u8 *)mac_two, ETH_ALEN); - - memcpy((__u8 *)&args->ip_src, (__u8 *)v6_pod_one, IPV6_ALEN); - memcpy((__u8 *)&args->ip_dst, (__u8 *)v6_pod_two, IPV6_ALEN); - - memcpy((__u8 *)&args->icmp_ns_addr, (__u8 *)v6_pod_three, IPV6_ALEN); - - args->llsrc_opt = llsrc_opt; - args->icmp_opt.type = 0x1; - args->icmp_opt.length = 0x1; - memcpy((__u8 *)args->icmp_opt.llsrc_mac, (__u8 *)llsrc_mac, ETH_ALEN); -} - -static __always_inline -void __ipv6_from_netdev_ns_pod_check_args(struct test_args *args, - bool llsrc_opt) -{ - union macaddr node_mac = THIS_INTERFACE_MAC; - - args->status_code = CTX_ACT_REDIRECT; - - memcpy((__u8 *)args->mac_src, (__u8 *)&node_mac.addr, ETH_ALEN); - memcpy((__u8 *)args->mac_dst, (__u8 *)mac_one, ETH_ALEN); - - memcpy((__u8 *)&args->ip_src, (__u8 *)v6_pod_three, IPV6_ALEN); - memcpy((__u8 *)&args->ip_dst, (__u8 *)v6_pod_one, IPV6_ALEN); - - args->icmp_type = ICMP6_NA_MSG_TYPE; - memcpy((__u8 *)&args->icmp_ns_addr, (__u8 *)v6_pod_three, IPV6_ALEN); - - args->llsrc_opt = llsrc_opt; - args->icmp_opt.type = 0x2; - args->icmp_opt.length = 0x1; - memcpy((__u8 *)args->icmp_opt.llsrc_mac, (__u8 *)&node_mac, ETH_ALEN); -} - PKTGEN("tc", "011_ipv6_from_netdev_ns_pod") int ipv6_from_netdev_ns_pod_pktgen(struct __ctx_buff *ctx) { - struct test_args args = {0}; + struct pktgen builder; + + pktgen__init(&builder, ctx); - __ipv6_from_netdev_ns_pod_pktgen_args(&args, true); - return __ipv6_from_netdev_ns_pktgen(ctx, &args); + BUF_DECL(V6_NDP_POD_NS_LLOPT, v6_ndp_pod_ns_llopt); + BUILDER_PUSH_BUF(builder, V6_NDP_POD_NS_LLOPT); + + pktgen__finish(&builder); + return 0; } SETUP("tc", "011_ipv6_from_netdev_ns_pod") @@ -229,19 +106,33 @@ int ipv6_from_netdev_ns_pod_setup(struct __ctx_buff *ctx) CHECK("tc", "011_ipv6_from_netdev_ns_pod") int ipv6_from_netdev_ns_pod_check(const struct __ctx_buff *ctx) { - struct test_args args = {0}; + test_init(); + + assert(__check_ret_code(ctx, CTX_ACT_REDIRECT)); + + BUF_DECL(V6_NDP_POD_NA_LLOPT, v6_ndp_pod_na_llopt); - __ipv6_from_netdev_ns_pod_check_args(&args, true); - return __ipv6_from_netdev_ns_check(ctx, &args); + ASSERT_CTX_BUF_OFF("pod_na_ns_llopt_ok", "Ether", ctx, sizeof(__u32), + V6_NDP_POD_NA_LLOPT, + sizeof(BUF(V6_NDP_POD_NA_LLOPT))); + test_finish(); + + return 0; } PKTGEN("tc", "011_ipv6_from_netdev_ns_pod_noopt") int ipv6_from_netdev_ns_pod_pktgen_noopt(struct __ctx_buff *ctx) { - struct test_args args = {0}; + struct pktgen builder; + + pktgen__init(&builder, ctx); - __ipv6_from_netdev_ns_pod_pktgen_args(&args, false); - return __ipv6_from_netdev_ns_pktgen(ctx, &args); + BUF_DECL(V6_NDP_POD_NS, v6_ndp_pod_ns); + BUILDER_PUSH_BUF(builder, V6_NDP_POD_NS); + + pktgen__finish(&builder); + + return 0; } SETUP("tc", "011_ipv6_from_netdev_ns_pod_noopt") @@ -253,10 +144,18 @@ int ipv6_from_netdev_ns_pod_setup_noopt(struct __ctx_buff *ctx) CHECK("tc", "011_ipv6_from_netdev_ns_pod_noopt") int ipv6_from_netdev_ns_pod_check_noopt(const struct __ctx_buff *ctx) { - struct test_args args = {0}; + test_init(); + + assert(__check_ret_code(ctx, CTX_ACT_REDIRECT)); - __ipv6_from_netdev_ns_pod_check_args(&args, false); - return __ipv6_from_netdev_ns_check(ctx, &args); + /* Note we always return NA with llopt */ + BUF_DECL(V6_NDP_POD_NA_LLOPT_NS_NOOPT, v6_ndp_pod_na_llopt); + ASSERT_CTX_BUF_OFF("pod_na_ns_noopt_ok", "Ether", ctx, sizeof(__u32), + V6_NDP_POD_NA_LLOPT_NS_NOOPT, + sizeof(BUF(V6_NDP_POD_NA_LLOPT_NS_NOOPT))); + test_finish(); + + return 0; } /* Bcast NS */ @@ -270,37 +169,19 @@ int __ipv6_from_netdev_ns_pod_setup_mcast(struct __ctx_buff *ctx) return TEST_ERROR; } -static __always_inline -void __ipv6_from_netdev_ns_pod_pktgen_mcast_args(struct test_args *args, - bool llsrc_opt) +PKTGEN("tc", "012_ipv6_from_netdev_ns_pod_mcast") +int ipv6_from_netdev_ns_pod_pktgen_mcast(struct __ctx_buff *ctx) { - __u8 llsrc_mac[] = {0x1, 0x1, 0x1, 0x1, 0x1, 0x1}; - - memcpy((__u8 *)args->mac_src, (__u8 *)mac_one, ETH_ALEN); - - ipv6_sol_mc_mac_set((union v6addr *)v6_pod_three, - (union macaddr *)args->mac_dst); - - memcpy((__u8 *)&args->ip_src, (__u8 *)v6_pod_one, IPV6_ALEN); - - ipv6_sol_mc_addr_set((union v6addr *)v6_pod_three, &args->ip_dst); - - memcpy((__u8 *)&args->icmp_ns_addr, (__u8 *)v6_pod_three, IPV6_ALEN); + struct pktgen builder; - args->llsrc_opt = llsrc_opt; - args->icmp_opt.type = 0x1; - args->icmp_opt.length = 0x1; - memcpy((__u8 *)args->icmp_opt.llsrc_mac, (__u8 *)llsrc_mac, ETH_ALEN); -} + pktgen__init(&builder, ctx); + BUF_DECL(V6_NDP_POD_NS_MCAST_LLOPT, v6_ndp_pod_ns_mcast_llopt); + BUILDER_PUSH_BUF(builder, V6_NDP_POD_NS_MCAST_LLOPT); -PKTGEN("tc", "012_ipv6_from_netdev_ns_pod_mcast") -int ipv6_from_netdev_ns_pod_pktgen_mcast(struct __ctx_buff *ctx) -{ - struct test_args args = {0}; + pktgen__finish(&builder); - __ipv6_from_netdev_ns_pod_pktgen_mcast_args(&args, true); - return __ipv6_from_netdev_ns_pktgen(ctx, &args); + return 0; } SETUP("tc", "012_ipv6_from_netdev_ns_pod_mcast") @@ -312,19 +193,33 @@ int ipv6_from_netdev_ns_pod_setup_mcast(struct __ctx_buff *ctx) CHECK("tc", "012_ipv6_from_netdev_ns_pod_mcast") int ipv6_from_netdev_ns_pod_check_mcast(const struct __ctx_buff *ctx) { - struct test_args args = {0}; + test_init(); + + assert(__check_ret_code(ctx, CTX_ACT_REDIRECT)); + + /* Note we always return NA with llopt */ + BUF_DECL(V6_NDP_POD_NA_MCAST_NS_NOOPT, v6_ndp_pod_na_llopt); + ASSERT_CTX_BUF_OFF("pod_na_ns_mcast_ok", "Ether", ctx, sizeof(__u32), + V6_NDP_POD_NA_MCAST_NS_NOOPT, + sizeof(BUF(V6_NDP_POD_NA_MCAST_NS_NOOPT))); + test_finish(); - __ipv6_from_netdev_ns_pod_check_args(&args, true); - return __ipv6_from_netdev_ns_check(ctx, &args); + return 0; } PKTGEN("tc", "012_ipv6_from_netdev_ns_pod_mcast_noopt") int ipv6_from_netdev_ns_pod_pktgen_mcast_noopt(struct __ctx_buff *ctx) { - struct test_args args = {0}; + struct pktgen builder; + + pktgen__init(&builder, ctx); + + BUF_DECL(V6_NDP_POD_NS_MCAST, v6_ndp_pod_ns_mcast); + BUILDER_PUSH_BUF(builder, V6_NDP_POD_NS_MCAST); + + pktgen__finish(&builder); - __ipv6_from_netdev_ns_pod_pktgen_mcast_args(&args, false); - return __ipv6_from_netdev_ns_pktgen(ctx, &args); + return 0; } SETUP("tc", "012_ipv6_from_netdev_ns_pod_mcast_noopt") @@ -336,10 +231,19 @@ int ipv6_from_netdev_ns_pod_setup_mcast_noopt(struct __ctx_buff *ctx) CHECK("tc", "012_ipv6_from_netdev_ns_pod_mcast_noopt") int ipv6_from_netdev_ns_pod_check_mcast_noopt(const struct __ctx_buff *ctx) { - struct test_args args = {0}; + test_init(); + + assert(__check_ret_code(ctx, CTX_ACT_REDIRECT)); + + /* Note we always return NA with llopt */ + BUF_DECL(V6_NDP_POD_NA_MCAST_LLOPT, v6_ndp_pod_na_llopt); + ASSERT_CTX_BUF_OFF("pod_na_ns_mcast_noopt_ok", "Ether", ctx, + sizeof(__u32), + V6_NDP_POD_NA_MCAST_LLOPT, + sizeof(BUF(V6_NDP_POD_NA_MCAST_LLOPT))); + test_finish(); - __ipv6_from_netdev_ns_pod_check_args(&args, false); - return __ipv6_from_netdev_ns_check(ctx, &args); + return 0; } /* @@ -357,45 +261,20 @@ int __ipv6_from_netdev_ns_node_ip_setup(struct __ctx_buff *ctx) return TEST_ERROR; } -static __always_inline -void __ipv6_from_netdev_ns_node_ip_pktgen_args(struct test_args *args, - bool llsrc_opt) -{ - __u8 llsrc_mac[] = {0x1, 0x1, 0x1, 0x1, 0x1, 0x1}; - - memcpy((__u8 *)args->mac_src, (__u8 *)mac_one, ETH_ALEN); - memcpy((__u8 *)args->mac_dst, (__u8 *)mac_two, ETH_ALEN); - - memcpy((__u8 *)&args->ip_src, (__u8 *)v6_pod_one, IPV6_ALEN); - memcpy((__u8 *)&args->ip_dst, (__u8 *)v6_pod_two, IPV6_ALEN); - - memcpy((__u8 *)&args->icmp_ns_addr, (__u8 *)&v6_node_one, IPV6_ALEN); - - args->icmp_type = ICMP6_NS_MSG_TYPE; - - args->llsrc_opt = llsrc_opt; - args->icmp_opt.type = 0x1; - args->icmp_opt.length = 0x1; - memcpy((__u8 *)args->icmp_opt.llsrc_mac, (__u8 *)llsrc_mac, ETH_ALEN); -} - -static __always_inline -void __ipv6_from_netdev_ns_node_ip_check_args(struct test_args *args, - bool llsrc_opt) -{ - /* Pkt is unmodified */ - __ipv6_from_netdev_ns_node_ip_pktgen_args(args, llsrc_opt); - args->status_code = CTX_ACT_OK; -} - /* With LL SRC option */ PKTGEN("tc", "0211_ipv6_from_netdev_ns_node_ip") int ipv6_from_netdev_ns_node_ip_pktgen(struct __ctx_buff *ctx) { - struct test_args args = {0}; + struct pktgen builder; + + pktgen__init(&builder, ctx); + + BUF_DECL(V6_NDP_NODE_NS_LLOPT, v6_ndp_node_ns_llopt); + BUILDER_PUSH_BUF(builder, V6_NDP_NODE_NS_LLOPT); - __ipv6_from_netdev_ns_node_ip_pktgen_args(&args, true); - return __ipv6_from_netdev_ns_pktgen(ctx, &args); + pktgen__finish(&builder); + + return 0; } SETUP("tc", "0211_ipv6_from_netdev_ns_node_ip") @@ -407,20 +286,35 @@ int ipv6_from_netdev_ns_node_ip_setup(struct __ctx_buff *ctx) CHECK("tc", "0211_ipv6_from_netdev_ns_node_ip") int ipv6_from_netdev_ns_node_ip_check(const struct __ctx_buff *ctx) { - struct test_args args = {0}; + test_init(); + + assert(__check_ret_code(ctx, CTX_ACT_OK)); - __ipv6_from_netdev_ns_node_ip_check_args(&args, true); - return __ipv6_from_netdev_ns_check(ctx, &args); + /* Packet should not be modified */ + BUF_DECL(V6_NDP_NODE_NS_LLOPT_PASS, v6_ndp_node_ns_llopt); + ASSERT_CTX_BUF_OFF("node_ns_pass", "Ether", ctx, + sizeof(__u32), + V6_NDP_NODE_NS_LLOPT_PASS, + sizeof(BUF(V6_NDP_NODE_NS_LLOPT_PASS))); + test_finish(); + + return 0; } /* Without LL SRC option */ PKTGEN("tc", "0212_ipv6_from_netdev_ns_node_ip_noopt") int ipv6_from_netdev_ns_node_ip_pktgen_noopt(struct __ctx_buff *ctx) { - struct test_args args = {0}; + struct pktgen builder; + + pktgen__init(&builder, ctx); - __ipv6_from_netdev_ns_node_ip_pktgen_args(&args, false); - return __ipv6_from_netdev_ns_pktgen(ctx, &args); + BUF_DECL(V6_NDP_NODE_NS, v6_ndp_node_ns); + BUILDER_PUSH_BUF(builder, V6_NDP_NODE_NS); + + pktgen__finish(&builder); + + return 0; } SETUP("tc", "0212_ipv6_from_netdev_ns_node_ip_noopt") @@ -432,10 +326,19 @@ int ipv6_from_netdev_ns_node_ip_setup_noopt(struct __ctx_buff *ctx) CHECK("tc", "0212_ipv6_from_netdev_ns_node_ip_noopt") int ipv6_from_netdev_ns_node_ip_check_noopt(const struct __ctx_buff *ctx) { - struct test_args args = {0}; + test_init(); + + assert(__check_ret_code(ctx, CTX_ACT_OK)); - __ipv6_from_netdev_ns_node_ip_check_args(&args, false); - return __ipv6_from_netdev_ns_check(ctx, &args); + /* Packet should not be modified */ + BUF_DECL(V6_NDP_NODE_NS_PASS, v6_ndp_node_ns); + ASSERT_CTX_BUF_OFF("node_ns_pass", "Ether", ctx, + sizeof(__u32), + V6_NDP_NODE_NS_PASS, + sizeof(BUF(V6_NDP_NODE_NS_PASS))); + test_finish(); + + return 0; } /* Bcast NS */ @@ -449,47 +352,19 @@ int __ipv6_from_netdev_ns_node_ip_setup_mcast(struct __ctx_buff *ctx) return TEST_ERROR; } -static __always_inline -void __ipv6_from_netdev_ns_node_ip_pktgen_mcast_args(struct test_args *args, - bool llsrc_opt) +PKTGEN("tc", "022_ipv6_from_netdev_ns_node_ip_mcast") +int ipv6_from_netdev_ns_node_ip_pktgen_mcast(struct __ctx_buff *ctx) { - __u8 llsrc_mac[] = {0x1, 0x1, 0x1, 0x1, 0x1, 0x1}; - - memcpy((__u8 *)args->mac_src, (__u8 *)mac_one, ETH_ALEN); - - ipv6_sol_mc_mac_set((union v6addr *)v6_pod_one, - (union macaddr *)args->mac_dst); - - memcpy((__u8 *)&args->ip_src, (__u8 *)v6_pod_one, IPV6_ALEN); - - ipv6_sol_mc_addr_set((union v6addr *)v6_pod_one, &args->ip_dst); - - memcpy((__u8 *)&args->icmp_ns_addr, (__u8 *)&v6_node_one, IPV6_ALEN); - - args->icmp_type = ICMP6_NS_MSG_TYPE; + struct pktgen builder; - args->llsrc_opt = llsrc_opt; - args->icmp_opt.type = 0x1; - args->icmp_opt.length = 0x1; - memcpy((__u8 *)args->icmp_opt.llsrc_mac, (__u8 *)llsrc_mac, ETH_ALEN); -} + pktgen__init(&builder, ctx); -static __always_inline -void __ipv6_from_netdev_ns_node_ip_check_mcast_args(struct test_args *args, - bool llsrc_opt) -{ - /* Pkt is unmodified */ - __ipv6_from_netdev_ns_node_ip_pktgen_mcast_args(args, llsrc_opt); - args->status_code = CTX_ACT_OK; -} + BUF_DECL(V6_NDP_NODE_NS_MCAST_LLOPT, v6_ndp_node_ns_mcast_llopt); + BUILDER_PUSH_BUF(builder, V6_NDP_NODE_NS_MCAST_LLOPT); -PKTGEN("tc", "022_ipv6_from_netdev_ns_node_ip_mcast") -int ipv6_from_netdev_ns_node_ip_pktgen_mcast(struct __ctx_buff *ctx) -{ - struct test_args args = {0}; + pktgen__finish(&builder); - __ipv6_from_netdev_ns_node_ip_pktgen_mcast_args(&args, true); - return __ipv6_from_netdev_ns_pktgen(ctx, &args); + return 0; } SETUP("tc", "022_ipv6_from_netdev_ns_node_ip_mcast") @@ -501,19 +376,34 @@ int ipv6_from_netdev_ns_node_ip_setup_mcast(struct __ctx_buff *ctx) CHECK("tc", "022_ipv6_from_netdev_ns_node_ip_mcast") int ipv6_from_netdev_ns_node_ip_check_mcast(const struct __ctx_buff *ctx) { - struct test_args args = {0}; + test_init(); + + assert(__check_ret_code(ctx, CTX_ACT_OK)); + + /* Packet should not be modified */ + BUF_DECL(V6_NDP_NODE_NS_MCAST_LLOPT_PASS, v6_ndp_node_ns_mcast_llopt); + ASSERT_CTX_BUF_OFF("node_ns_mcast_pass", "Ether", ctx, + sizeof(__u32), + V6_NDP_NODE_NS_MCAST_LLOPT_PASS, + sizeof(BUF(V6_NDP_NODE_NS_MCAST_LLOPT_PASS))); + test_finish(); - __ipv6_from_netdev_ns_node_ip_check_mcast_args(&args, true); - return __ipv6_from_netdev_ns_check(ctx, &args); + return 0; } PKTGEN("tc", "022_ipv6_from_netdev_ns_node_ip_mcast_noopt") int ipv6_from_netdev_ns_node_ip_pktgen_mcast_noopt(struct __ctx_buff *ctx) { - struct test_args args = {0}; + struct pktgen builder; - __ipv6_from_netdev_ns_node_ip_pktgen_mcast_args(&args, false); - return __ipv6_from_netdev_ns_pktgen(ctx, &args); + pktgen__init(&builder, ctx); + + BUF_DECL(V6_NDP_NODE_NS_MCAST, v6_ndp_node_ns_mcast); + BUILDER_PUSH_BUF(builder, V6_NDP_NODE_NS_MCAST); + + pktgen__finish(&builder); + + return 0; } SETUP("tc", "022_ipv6_from_netdev_ns_node_ip_mcast_noopt") @@ -525,8 +415,17 @@ int ipv6_from_netdev_ns_node_ip_setup_mcast_noopt(struct __ctx_buff *ctx) CHECK("tc", "022_ipv6_from_netdev_ns_node_ip_mcast_noopt") int ipv6_from_netdev_ns_node_ip_check_mcast_noopt(const struct __ctx_buff *ctx) { - struct test_args args = {0}; + test_init(); + + assert(__check_ret_code(ctx, CTX_ACT_OK)); - __ipv6_from_netdev_ns_node_ip_check_mcast_args(&args, false); - return __ipv6_from_netdev_ns_check(ctx, &args); + /* Packet should not be modified */ + BUF_DECL(V6_NDP_NODE_NS_MCAST_PASS, v6_ndp_node_ns_mcast); + ASSERT_CTX_BUF_OFF("node_ns_mcast_noopt_pass", "Ether", ctx, + sizeof(__u32), + V6_NDP_NODE_NS_MCAST_PASS, + sizeof(BUF(V6_NDP_NODE_NS_MCAST_PASS))); + test_finish(); + + return 0; } diff --git a/bpf/tests/scapy.h b/bpf/tests/scapy.h index 7ee352de2d466..e462859a81532 100644 --- a/bpf/tests/scapy.h +++ b/bpf/tests/scapy.h @@ -40,15 +40,15 @@ */ #define ASSERT_CTX_BUF_OFF(NAME, FIRST_LAYER, CTX, OFF, BUF_NAME, LEN) \ do { \ - void *__data = (void *)(long)(CTX)->data; \ - void *__data_end = (void *)(long)(CTX)->data_end; \ - __data += OFF; \ + void *__DATA = (void *)(long)(CTX)->data; \ + void *__DATA_END = (void *)(long)(CTX)->data_end; \ + __DATA += OFF; \ bool ok = true; \ __u16 _len = LEN; \ \ - if (__data + (LEN) > __data_end) { \ + if (__DATA + (LEN) > __DATA_END) { \ ok = false; \ - _len = (__u16)(data_end - __data); \ + _len = (__u16)(__DATA_END - __DATA); \ test_log("CTX len (%d) - offset (%d) < LEN (%d)", \ _len + OFF, OFF, LEN); \ } \ @@ -57,7 +57,7 @@ test_log("Buffer '" #BUF_NAME "' of len (%d) < LEN" \ " (%d)", sizeof(BUF(BUF_NAME)), LEN); \ } \ - if (ok && memcmp(__data, &BUF(BUF_NAME), LEN) != 0) { \ + if (ok && memcmp(__DATA, &BUF(BUF_NAME), LEN) != 0) { \ ok = false; \ test_log("CTX and buffer '" #BUF_NAME \ "' content mismatch "); \ diff --git a/bpf/tests/scapy/README.md b/bpf/tests/scapy/README.md index 9392e5a64be6f..60bb79b96f146 100644 --- a/bpf/tests/scapy/README.md +++ b/bpf/tests/scapy/README.md @@ -90,10 +90,12 @@ Replace values with the constants defined in `pkt_defs.py` (e.g.MACs, IPs). Add any new value necessary in `pkt_defs.py`: ``` -l2_announce6_ns = Ether(dst=l2_announce6_ns_mmac, src=mac_one)/ \ - IPv6(src=v6_ext_node_one, dst=l2_announce6_ns_ma, hlim=255)/ \ - ICMPv6ND_NS(tgt=v6_svc_one)/ \ - ICMPv6NDOptSrcLLAddr(lladdr=mac_one) +l2_announce6_ns = ( + Ether(dst=l2_announce6_ns_mmac, src=mac_one) / + IPv6(src=v6_ext_node_one, dst=l2_announce6_ns_ma, hlim=255) / + ICMPv6ND_NS(tgt=v6_svc_one) / + ICMPv6NDOptSrcLLAddr(lladdr=mac_one) +) ``` Run the test and adjust the scapy packet until it passes. @@ -104,10 +106,12 @@ If the expected packet in the `_check` function is different than the injected, define the new packet. You can take as reference the injected packet. ``` -l2_announce6_na = Ether(dst=mac_one, src=mac_two)/ \ - IPv6(src=v6_svc_one, dst=v6_ext_node_one, hlim=255)/ \ - ICMPv6ND_NA(R=0, S=1, O=1, tgt=v6_svc_one)/ \ - ICMPv6NDOptDstLLAddr(lladdr=mac_two) +l2_announce6_na = ( + Ether(dst=mac_one, src=mac_two) / + IPv6(src=v6_svc_one, dst=v6_ext_node_one, hlim=255) / + ICMPv6ND_NA(R=0, S=1, O=1, tgt=v6_svc_one) / + ICMPv6NDOptDstLLAddr(lladdr=mac_two) +) ``` Add the `ASSERT_CTX_BUF_*()` after the current assertions but before diff --git a/bpf/tests/scapy/pkt_defs.py b/bpf/tests/scapy/pkt_defs.py index a9436df5735b8..32c02c6ee6a43 100644 --- a/bpf/tests/scapy/pkt_defs.py +++ b/bpf/tests/scapy/pkt_defs.py @@ -72,43 +72,118 @@ default_data = "Should not change!!" # Utility functions -def get_v6_ns_addr(v6_addr:str) -> str: - addr_bytes = in6_getnsma(inet_pton(socket.AF_INET6, v6_svc_one)) +def v6_get_ns_addr(v6_addr:str) -> str: + addr_bytes = in6_getnsma(inet_pton(socket.AF_INET6, v6_addr)) return inet_ntop(socket.AF_INET6, addr_bytes) -def v6_ns_mac(v6_addr:str) -> str: - addr_bytes = in6_getnsma(inet_pton(socket.AF_INET6, v6_svc_one)) +def v6_get_ns_mac(v6_addr:str) -> str: + addr_bytes = in6_getnsma(inet_pton(socket.AF_INET6, v6_addr)) return in6_getnsmac(addr_bytes) # Test packet/buffer definitions -## L2 announce (v4) +## IPv6 ndp from netdev +### Pod NS/NA +v6_ndp_pod_ns = ( + Ether(dst=mac_two, src=mac_one) / + IPv6(dst=v6_pod_two, src=v6_pod_one, hlim=255) / + ICMPv6ND_NS(tgt=v6_pod_three) +) + +v6_ndp_pod_ns_llopt = ( + v6_ndp_pod_ns / + ICMPv6NDOptSrcLLAddr(lladdr="01:01:01:01:01:01") +) + +v6_ndp_pod_na_llopt = ( + Ether(dst=mac_one, src=mac_two) / + IPv6(dst=v6_pod_one, src=v6_pod_three, hlim=255) / + ICMPv6ND_NA(R=0, S=1, O=1, tgt=v6_pod_three) / + ICMPv6NDOptDstLLAddr(lladdr=mac_two) +) + +v6_ndp_pod_ns_mmac = v6_get_ns_mac(v6_pod_three) +v6_ndp_pod_ns_ma = v6_get_ns_addr(v6_pod_three) +assert(v6_ndp_pod_ns_mmac == '33:33:ff:00:00:03') +assert(v6_ndp_pod_ns_ma == 'ff02::1:ff00:3') + +v6_ndp_pod_ns_mcast = ( + Ether(dst=v6_ndp_pod_ns_mmac, src=mac_one) / + IPv6(dst=v6_ndp_pod_ns_ma, src=v6_pod_one, hlim=255) / + ICMPv6ND_NS(tgt=v6_pod_three) +) + +v6_ndp_pod_ns_mcast_llopt = ( + v6_ndp_pod_ns_mcast / + ICMPv6NDOptSrcLLAddr(lladdr="01:01:01:01:01:01") +) + +### Node NS/NA +v6_ndp_node_ns = ( + Ether(dst=mac_two, src=mac_one) / + IPv6(dst=v6_pod_two, src=v6_pod_one, hlim=255) / + ICMPv6ND_NS(tgt=v6_node_one) +) + +v6_ndp_node_ns_llopt = ( + v6_ndp_node_ns / + ICMPv6NDOptSrcLLAddr(lladdr="01:01:01:01:01:01") +) + +v6_ndp_node_ns_mmac = v6_get_ns_mac(v6_node_one) +v6_ndp_node_ns_ma = v6_get_ns_addr(v6_node_one) +assert(v6_ndp_node_ns_mmac == '33:33:ff:00:00:01') +assert(v6_ndp_node_ns_ma == 'ff02::1:ff00:1') + +v6_ndp_node_ns_mcast = ( + Ether(dst=v6_ndp_node_ns_mmac, src=mac_one) / + IPv6(dst=v6_ndp_node_ns_ma, src=v6_pod_one, hlim=255) / + ICMPv6ND_NS(tgt=v6_node_one) +) + +v6_ndp_node_ns_mcast_llopt = ( + v6_ndp_node_ns_mcast / + ICMPv6NDOptSrcLLAddr(lladdr="01:01:01:01:01:01") +) -l2_announce_arp_req = Ether(dst=mac_bcast, src=mac_one)/ \ - ARP(op="who-has", psrc=v4_ext_one, pdst=v4_svc_one, \ - hwsrc=mac_one, hwdst=mac_bcast) -l2_announce_arp_reply = Ether(dst=mac_one, src=mac_two)/ \ - ARP(op="is-at", psrc=v4_svc_one, pdst=v4_ext_one, \ - hwsrc=mac_two, hwdst=mac_one) +## L2 announce (v4) +l2_announce_arp_req = ( + Ether(dst=mac_bcast, src=mac_one) / + ARP(op="who-has", psrc=v4_ext_one, pdst=v4_svc_one, \ + hwsrc=mac_one, hwdst=mac_bcast) +) + +l2_announce_arp_reply = ( + Ether(dst=mac_one, src=mac_two) / + ARP(op="is-at", psrc=v4_svc_one, pdst=v4_ext_one, \ + hwsrc=mac_two, hwdst=mac_one) +) ## L2 announce (v6) ### Calculate the IPv6 NS solicitation address -l2_announce6_ns_mmac = v6_ns_mac(v6_svc_one) -l2_announce6_ns_ma = get_v6_ns_addr(v6_svc_one) +l2_announce6_ns_mmac = v6_get_ns_mac(v6_svc_one) +l2_announce6_ns_ma = v6_get_ns_addr(v6_svc_one) assert(l2_announce6_ns_mmac == '33:33:ff:00:00:01') assert(l2_announce6_ns_ma == 'ff02::1:ff00:1') -l2_announce6_ns = Ether(dst=l2_announce6_ns_mmac, src=mac_one)/ \ - IPv6(src=v6_ext_node_one, dst=l2_announce6_ns_ma, hlim=255)/ \ - ICMPv6ND_NS(tgt=v6_svc_one)/ \ - ICMPv6NDOptSrcLLAddr(lladdr=mac_one) -l2_announce6_targeted_ns = \ - Ether(dst=mac_two, src=mac_one) / \ - IPv6(src=v6_ext_node_one, dst=v6_svc_one, hlim=255) / \ - ICMPv6ND_NS(tgt=v6_svc_one) / \ - ICMPv6NDOptSrcLLAddr(lladdr=mac_one) -l2_announce6_na = Ether(dst=mac_one, src=mac_two)/ \ - IPv6(src=v6_svc_one, dst=v6_ext_node_one, hlim=255)/ \ - ICMPv6ND_NA(R=0, S=1, O=1, tgt=v6_svc_one)/ \ - ICMPv6NDOptDstLLAddr(lladdr=mac_two) +l2_announce6_ns = ( + Ether(dst=l2_announce6_ns_mmac, src=mac_one) / + IPv6(src=v6_ext_node_one, dst=l2_announce6_ns_ma, hlim=255) / + ICMPv6ND_NS(tgt=v6_svc_one) / + ICMPv6NDOptSrcLLAddr(lladdr=mac_one) +) + +l2_announce6_targeted_ns = ( + Ether(dst=mac_two, src=mac_one) / + IPv6(src=v6_ext_node_one, dst=v6_svc_one, hlim=255) / + ICMPv6ND_NS(tgt=v6_svc_one) / + ICMPv6NDOptSrcLLAddr(lladdr=mac_one) +) + +l2_announce6_na = ( + Ether(dst=mac_one, src=mac_two) / + IPv6(src=v6_svc_one, dst=v6_ext_node_one, hlim=255) / + ICMPv6ND_NA(R=0, S=1, O=1, tgt=v6_svc_one) / + ICMPv6NDOptDstLLAddr(lladdr=mac_two) +) diff --git a/bpf/tests/tc_lb_no_backend_nonroutable.c b/bpf/tests/tc_lb_no_backend_nonroutable.c new file mode 100644 index 0000000000000..cb4309bb92946 --- /dev/null +++ b/bpf/tests/tc_lb_no_backend_nonroutable.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright Authors of Cilium */ + +#include +#include "common.h" + +/* Enable CT debug output */ +#undef QUIET_CT + +#include "pktgen.h" + +/* Enable code paths under test*/ +#define ENABLE_IPV4 + +/* Skip ingress policy checks */ +#define USE_BPF_PROG_FOR_INGRESS_POLICY + +#include + +/* Set the LXC source address to be the address of pod one */ +ASSIGN_CONFIG(union v4addr, endpoint_ipv4, { .be32 = v4_pod_one}) +ASSIGN_CONFIG(union v4addr, service_loopback_ipv4, { .be32 = v4_svc_loopback }) +ASSIGN_CONFIG(bool, enable_no_service_endpoints_routable, false) + +#include "lib/endpoint.h" +#include "lib/ipcache.h" +#include "lib/lb.h" +#include "lib/policy.h" + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(max_entries, 2); + __array(values, int()); +} entry_call_map __section(".maps") = { + .values = { + [0] = &cil_from_container, + [1] = &cil_to_container, + }, +}; + +/* Setup for this test: + * +-------ClusterIP--------+ +----------Pod 1---------+ + * | v4_svc_one:tcp_svc_one | -> | v4_pod_one:tcp_svc_one | + * +------------------------+ +------------------------+ + * ^ | + * \---------------------------/ + */ + +static __always_inline int build_packet(struct __ctx_buff *ctx, + __be16 sport) +{ + struct pktgen builder; + volatile const __u8 *src = mac_one; + volatile const __u8 *dst = mac_two; + struct tcphdr *l4; + void *data; + + /* Init packet builder */ + pktgen__init(&builder, ctx); + + l4 = pktgen__push_ipv4_tcp_packet(&builder, + (__u8 *)src, (__u8 *)dst, + v4_pod_one, v4_svc_one, + sport, tcp_svc_one); + if (!l4) + return TEST_ERROR; + + data = pktgen__push_data(&builder, default_data, sizeof(default_data)); + + if (!data) + return TEST_ERROR; + + /* Calc lengths, set protocol fields and calc checksums */ + pktgen__finish(&builder); + + return 0; +} + +/* Test that a packet for a SVC without any backend does not get dropped (enable_no_endpoints_routable=false). */ +SETUP("tc", "tc_lb_no_backend_nonroutable") +int tc_lb_no_backend_nonroutable_setup(struct __ctx_buff *ctx) +{ + int ret; + + ret = build_packet(ctx, tcp_src_two); + if (ret) + return ret; + + lb_v4_add_service_with_flags(v4_svc_one, tcp_svc_one, IPPROTO_TCP, 0, 1, + SVC_FLAG_LOADBALANCER, 0); + + /* avoid policy drop */ + policy_add_egress_allow_all_entry(); + + /* Jump into the entrypoint */ + tail_call_static(ctx, entry_call_map, 0); + /* Fail if we didn't jump */ + return TEST_ERROR; +} + +CHECK("tc", "tc_lb_no_backend_nonroutable") +int tc_lb_no_backend_nonroutable_check(const struct __ctx_buff *ctx) +{ + __u32 expected_status = TC_ACT_OK; + __u32 *status_code; + void *data_end; + void *data; + + test_init(); + + data = (void *)(long)ctx->data; + data_end = (void *)(long)ctx->data_end; + + if (data + sizeof(__u32) > data_end) + test_fatal("status code out of bounds"); + + status_code = data; + + if (*status_code != expected_status) + test_fatal("status code is %lu, expected %lu", *status_code, expected_status); + + test_finish(); +} + +/* Test that a packet for a SVC without any backend with eTP=Local gets dropped. */ +SETUP("tc", "tc_lb_no_backend_nonroutable_etp") +int tc_lb_no_backend_nonroutable_etp_setup(struct __ctx_buff *ctx) +{ + int ret; + + ret = build_packet(ctx, tcp_src_two); + if (ret) + return ret; + + lb_v4_add_service_with_flags(v4_svc_one, tcp_svc_one, IPPROTO_TCP, 0, 1, + SVC_FLAG_LOADBALANCER | SVC_FLAG_EXT_LOCAL_SCOPE, 0); + + /* avoid policy drop */ + policy_add_egress_allow_all_entry(); + + /* Jump into the entrypoint */ + tail_call_static(ctx, entry_call_map, 0); + /* Fail if we didn't jump */ + return TEST_ERROR; +} + +CHECK("tc", "tc_lb_no_backend_nonroutable_etp") +int tc_lb_no_backend_nonroutable_etp_check(const struct __ctx_buff *ctx) +{ + __u32 expected_status = TC_ACT_SHOT; + __u32 *status_code; + void *data_end; + void *data; + + test_init(); + + data = (void *)(long)ctx->data; + data_end = (void *)(long)ctx->data_end; + + if (data + sizeof(__u32) > data_end) + test_fatal("status code out of bounds"); + + status_code = data; + + if (*status_code != expected_status) + test_fatal("status code is %lu, expected %lu", *status_code, expected_status); + + test_finish(); +} diff --git a/bpf/tests/tc_lxc_lb4_no_backend.c b/bpf/tests/tc_lxc_lb4_no_backend.c index 2a039bcd7a8ba..5e474ed02acf3 100644 --- a/bpf/tests/tc_lxc_lb4_no_backend.c +++ b/bpf/tests/tc_lxc_lb4_no_backend.c @@ -25,6 +25,8 @@ static volatile const __u8 lb_mac[ETH_ALEN] = { 0xce, 0x72, 0xa7, 0x03, 0x88, 0x #include +ASSIGN_CONFIG(bool, enable_no_service_endpoints_routable, true) + #include "lib/ipcache.h" #include "lib/lb.h" diff --git a/bpf/tests/tc_lxc_lb6_no_backend.c b/bpf/tests/tc_lxc_lb6_no_backend.c index 90dfe88eb8303..a00ba1c260be4 100644 --- a/bpf/tests/tc_lxc_lb6_no_backend.c +++ b/bpf/tests/tc_lxc_lb6_no_backend.c @@ -25,6 +25,8 @@ static volatile const __u8 lb_mac[ETH_ALEN] = { 0xce, 0x72, 0xa7, 0x03, 0x88, 0x #include +ASSIGN_CONFIG(bool, enable_no_service_endpoints_routable, true) + #include "lib/ipcache.h" #include "lib/lb.h" diff --git a/bpf/tests/tc_nodeport_lb4_no_backend.c b/bpf/tests/tc_nodeport_lb4_no_backend.c index 16008ac307076..1db122fa4a7fa 100644 --- a/bpf/tests/tc_nodeport_lb4_no_backend.c +++ b/bpf/tests/tc_nodeport_lb4_no_backend.c @@ -27,6 +27,7 @@ static volatile const __u8 lb_mac[ETH_ALEN] = { 0xce, 0x72, 0xa7, 0x03, 0x88, 0x #include ASSIGN_CONFIG(union v4addr, nat_ipv4_masquerade, { .be32 = FRONTEND_IP}) +ASSIGN_CONFIG(bool, enable_no_service_endpoints_routable, true) #include "lib/ipcache.h" #include "lib/lb.h" diff --git a/bpf/tests/tc_nodeport_lb4_wildcard_drop.c b/bpf/tests/tc_nodeport_lb4_wildcard_drop.c index a70dac1cb48bf..1164687020745 100644 --- a/bpf/tests/tc_nodeport_lb4_wildcard_drop.c +++ b/bpf/tests/tc_nodeport_lb4_wildcard_drop.c @@ -27,6 +27,8 @@ static volatile const __u8 lb_mac[ETH_ALEN] = { 0xce, 0x72, 0xa7, 0x03, 0x88, 0x #include +ASSIGN_CONFIG(bool, enable_no_service_endpoints_routable, true) + #include "lib/ipcache.h" #include "lib/lb.h" diff --git a/bpf/tests/tc_nodeport_lb6_no_backend.c b/bpf/tests/tc_nodeport_lb6_no_backend.c index 370fef0d3b7b5..8b74672421f3d 100644 --- a/bpf/tests/tc_nodeport_lb6_no_backend.c +++ b/bpf/tests/tc_nodeport_lb6_no_backend.c @@ -26,6 +26,8 @@ static volatile const __u8 lb_mac[ETH_ALEN] = { 0xce, 0x72, 0xa7, 0x03, 0x88, 0x #include +ASSIGN_CONFIG(bool, enable_no_service_endpoints_routable, true) + #include "lib/ipcache.h" #include "lib/lb.h" diff --git a/bpf/tests/tc_nodeport_lb6_wildcard_drop.c b/bpf/tests/tc_nodeport_lb6_wildcard_drop.c index 90c69206d222b..c0d34364ad337 100644 --- a/bpf/tests/tc_nodeport_lb6_wildcard_drop.c +++ b/bpf/tests/tc_nodeport_lb6_wildcard_drop.c @@ -22,6 +22,8 @@ static volatile const __u8 lb_mac[ETH_ALEN] = { 0xce, 0x72, 0xa7, 0x03, 0x88, 0x #include +ASSIGN_CONFIG(bool, enable_no_service_endpoints_routable, true) + #include "lib/ipcache.h" #include "lib/lb.h" diff --git a/bpf/tests/tc_nodeport_test.c b/bpf/tests/tc_nodeport_test.c index 3a968d0e325f8..7dd16f0060d35 100644 --- a/bpf/tests/tc_nodeport_test.c +++ b/bpf/tests/tc_nodeport_test.c @@ -26,6 +26,7 @@ ASSIGN_CONFIG(union v6addr, service_loopback_ipv6, { .addr = v6_svc_loopback }) #define POD_IPV6 v6_pod_one #define SERVICE_IPV6 v6_node_three +ASSIGN_CONFIG(bool, enable_no_service_endpoints_routable, true) #include "lib/endpoint.h" #include "lib/ipcache.h" diff --git a/bpf/tests/xdp_nodeport_lb4_test.c b/bpf/tests/xdp_nodeport_lb4_test.c index c693e2f1f2e20..1f00ffdf51466 100644 --- a/bpf/tests/xdp_nodeport_lb4_test.c +++ b/bpf/tests/xdp_nodeport_lb4_test.c @@ -24,6 +24,8 @@ long mock_fib_lookup(__maybe_unused void *ctx, struct bpf_fib_lookup *params, #include "bpf_xdp.c" #include "lib/nodeport.h" +ASSIGN_CONFIG(bool, enable_no_service_endpoints_routable, true) + #include "lib/lb.h" struct { diff --git a/bugtool/cmd/configuration.go b/bugtool/cmd/configuration.go index 5761d70817a7b..5c147c6dbcc2d 100644 --- a/bugtool/cmd/configuration.go +++ b/bugtool/cmd/configuration.go @@ -130,6 +130,7 @@ var bpfMapsPath = []string{ "tc/globals/cilium_snat_v4_external", "tc/globals/cilium_snat_v6_external", "tc/globals/cilium_vtep_map", + "tc/globals/cilium_vtep_policy_map", "tc/globals/cilium_l2_responder_v4", "tc/globals/cilium_ratelimit", "tc/globals/cilium_ratelimit_metrics", diff --git a/cilium-cli/Dockerfile b/cilium-cli/Dockerfile index 83f82a3ac9316..edc55cf92831b 100644 --- a/cilium-cli/Dockerfile +++ b/cilium-cli/Dockerfile @@ -4,7 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 ARG BASE_IMAGE=gcr.io/distroless/static:latest@sha256:f2ff10a709b0fd153997059b698ada702e4870745b6077eff03a5f4850ca91b6 -ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:a5e935dbd8bc3a5ea24388e376388c9a69b40628b6788a81658a801abbec8f2e +ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 # BUILDPLATFORM is an automatic platform ARG enabled by Docker BuildKit. # Represents the plataform where the build is happening, do not mix with # TARGETARCH diff --git a/cilium-cli/clustermesh/clustermesh.go b/cilium-cli/clustermesh/clustermesh.go index 2356c6222a940..854a36942123f 100644 --- a/cilium-cli/clustermesh/clustermesh.go +++ b/cilium-cli/clustermesh/clustermesh.go @@ -629,12 +629,11 @@ type ClusterStats struct { } type ConnectivityStatus struct { - GlobalServices StatisticalStatus `json:"global_services,omitempty"` - Connected StatisticalStatus `json:"connected,omitempty"` - Clusters map[string]*ClusterStats `json:"clusters,omitempty"` - Total int64 `json:"total,omitempty"` - NotReady int64 `json:"not_ready,omitempty"` - Errors status.ErrorCountMapMap `json:"errors,omitempty"` + Connected StatisticalStatus `json:"connected,omitempty"` + Clusters map[string]*ClusterStats `json:"clusters,omitempty"` + Total int64 `json:"total,omitempty"` + NotReady int64 `json:"not_ready,omitempty"` + Errors status.ErrorCountMapMap `json:"errors,omitempty"` } func (c *ConnectivityStatus) addError(pod, cluster string, err error) { @@ -682,15 +681,6 @@ func remoteClusterStatusToError(status *models.RemoteCluster) error { } func (c *ConnectivityStatus) parseAgentStatus(name string, expected []string, s *status.ClusterMeshAgentConnectivityStatus) { - if c.GlobalServices.Min < 0 || c.GlobalServices.Min > s.GlobalServices { - c.GlobalServices.Min = s.GlobalServices - } - - if c.GlobalServices.Max < s.GlobalServices { - c.GlobalServices.Max = s.GlobalServices - } - - c.GlobalServices.Avg += float64(s.GlobalServices) c.Total++ ready := int64(0) @@ -797,10 +787,9 @@ func (k *K8sClusterMesh) determineStatusConnectivity(ctx context.Context, secret collector func(ctx context.Context, ciliumPod string) (*status.ClusterMeshAgentConnectivityStatus, error), ) (*ConnectivityStatus, error) { stats := &ConnectivityStatus{ - GlobalServices: StatisticalStatus{Min: -1}, - Connected: StatisticalStatus{Min: -1}, - Errors: status.ErrorCountMapMap{}, - Clusters: map[string]*ClusterStats{}, + Connected: StatisticalStatus{Min: -1}, + Errors: status.ErrorCountMapMap{}, + Clusters: map[string]*ClusterStats{}, } // Retrieve the remote clusters to connect to from the clustermesh configuration, @@ -840,7 +829,6 @@ func (k *K8sClusterMesh) determineStatusConnectivity(ctx context.Context, secret } if len(pods.Items) > 0 { - stats.GlobalServices.Avg /= float64(len(pods.Items)) stats.Connected.Avg /= float64(len(pods.Items)) } @@ -970,12 +958,6 @@ func (k *K8sClusterMesh) outputConnectivityStatus(agents, kvstoremesh *Connectiv k.Log("🔌 No cluster connected") } - k.Log("") - k.Log("🔀 Global services: [ min:%d / avg:%.1f / max:%d ]", - agents.GlobalServices.Min, - agents.GlobalServices.Avg, - agents.GlobalServices.Max) - k.Log("") errCount := len(agents.Errors) if kvstoremesh != nil { diff --git a/cilium-cli/connectivity/check/features.go b/cilium-cli/connectivity/check/features.go index 0d9774986e2c0..a8e8ffe0d3522 100644 --- a/cilium-cli/connectivity/check/features.go +++ b/cilium-cli/connectivity/check/features.go @@ -172,10 +172,15 @@ func (ct *ConnectivityTest) extractFeaturesFromCiliumStatus(ctx context.Context, result[features.KPRSocketLB] = features.Status{Enabled: f.SocketLB.Enabled} result[features.KPRSocketLBHostnsOnly] = features.Status{Enabled: f.BpfSocketLBHostnsOnly} } + acceleration := strings.ToLower(f.NodePort.Acceleration) + result[features.KPRNodePortAcceleration] = features.Status{ + Enabled: mode == "true" && acceleration != "disabled", + Mode: acceleration, + } } } result[features.KPR] = features.Status{ - Enabled: mode == "true" || mode == "strict", + Enabled: mode == "true", Mode: mode, } diff --git a/cilium-cli/connectivity/check/metrics.go b/cilium-cli/connectivity/check/metrics.go index f863a77618a62..522947edcd362 100644 --- a/cilium-cli/connectivity/check/metrics.go +++ b/cilium-cli/connectivity/check/metrics.go @@ -11,6 +11,7 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" "github.com/cilium/cilium/pkg/k8s/portforward" ) @@ -103,7 +104,7 @@ func (a *Action) collectMetricsForPod(pod Pod, port string) (promMetricsFamily, // parseMetrics transforms the response from the call to prometheus metric endpoint // into a dto model MetricFamily. func parseMetrics(reader io.Reader) (promMetricsFamily, error) { - var parser expfmt.TextParser + var parser = expfmt.NewTextParser(model.LegacyValidation) mf, err := parser.TextToMetricFamilies(reader) if err != nil { return nil, err diff --git a/cilium-cli/features/status.go b/cilium-cli/features/status.go index 0d8b4a69dce51..aa1bb6683b202 100644 --- a/cilium-cli/features/status.go +++ b/cilium-cli/features/status.go @@ -21,7 +21,7 @@ import ( ) var ( - cmdMetricsList = []string{"cilium", "metrics", "list", "-p", "cilium_feature", "-o", "json"} + subCmdMetricsList = []string{"metrics", "list", "-p", "cilium_.*feature", "-o", "json"} ) // perDeployNodeMetrics maps a deployment name to their node metrics @@ -247,7 +247,8 @@ func (s *Feature) fetchStatusConcurrently(ctx context.Context, pods []corev1.Pod } func (s *Feature) fetchCiliumFeatureMetricsFromPod(ctx context.Context, pod corev1.Pod) ([]*models.Metric, error) { - output, err := s.client.ExecInPod(ctx, pod.Namespace, pod.Name, defaults.AgentContainerName, cmdMetricsList) + agentCmd := append([]string{"cilium"}, subCmdMetricsList...) + output, err := s.client.ExecInPod(ctx, pod.Namespace, pod.Name, defaults.AgentContainerName, agentCmd) if err != nil { return nil, fmt.Errorf("failed to features status from %s: %w", pod.Name, err) } @@ -266,8 +267,8 @@ func (s *Feature) fetchCiliumOperatorFeatureMetricsFromPod(ctx context.Context, return nil, fmt.Errorf("operator command not found in Cilium Operator pod. Use --operator-container-command to define it") } } - cmd := []string{operatorCmd, "metrics", "list", "-p", "cilium_operator_feature", "-o", "json"} - output, err := s.client.ExecInPod(ctx, pod.Namespace, pod.Name, defaults.OperatorContainerName, cmd) + operatorCmds := append([]string{operatorCmd}, subCmdMetricsList...) + output, err := s.client.ExecInPod(ctx, pod.Namespace, pod.Name, defaults.OperatorContainerName, operatorCmds) if err != nil && !strings.Contains(err.Error(), "level=debug") { return []*models.Metric{}, fmt.Errorf("failed to get features status from %s: %w", pod.Name, err) } diff --git a/cilium-cli/status/k8s.go b/cilium-cli/status/k8s.go index 462ffb5f4587a..c10da53371f6e 100644 --- a/cilium-cli/status/k8s.go +++ b/cilium-cli/status/k8s.go @@ -92,9 +92,8 @@ func NewK8sStatusCollector(client k8sImplementation, params K8sStatusParameters) } type ClusterMeshAgentConnectivityStatus struct { - GlobalServices int64 - Clusters map[string]*models.RemoteCluster - Errors ErrorCountMap + Clusters map[string]*models.RemoteCluster + Errors ErrorCountMap } // ErrClusterMeshStatusNotAvailable is a sentinel. @@ -117,7 +116,6 @@ func (k *K8sStatusCollector) ClusterMeshConnectivity(ctx context.Context, cilium return nil, ErrClusterMeshStatusNotAvailable } - c.GlobalServices = status.ClusterMesh.NumGlobalServices for _, cluster := range status.ClusterMesh.Clusters { c.Clusters[cluster.Name] = cluster } diff --git a/cilium-cli/sysdump/constants.go b/cilium-cli/sysdump/constants.go index e668612734479..39bcd406ee48a 100644 --- a/cilium-cli/sysdump/constants.go +++ b/cilium-cli/sysdump/constants.go @@ -96,6 +96,8 @@ const ( kubernetesEventsTableFileName = "k8s-events-.html" kubernetesLeasesFileName = "k8s-leases-.yaml" kubernetesMetricsFileName = "k8s-metrics-.yaml" + kubernetesTopNodesFileName = "k8s-node-memory-cpu-usage-.txt" + kubernetesTopPodsFileName = "k8s-pod-memory-cpu-usage-.txt" kubernetesNamespacesFileName = "k8s-namespaces-.yaml" kubernetesNetworkPoliciesFileName = "k8s-networkpolicies-.yaml" kubernetesNodesFileName = "k8s-nodes-.yaml" diff --git a/cilium-cli/sysdump/sysdump.go b/cilium-cli/sysdump/sysdump.go index 25b9c74c7aad5..56bd951febd15 100644 --- a/cilium-cli/sysdump/sysdump.go +++ b/cilium-cli/sysdump/sysdump.go @@ -9,6 +9,7 @@ import ( "bytes" "compress/gzip" "context" + "encoding/json" "errors" "fmt" "io" @@ -19,6 +20,7 @@ import ( "strconv" "strings" "sync" + "text/tabwriter" "time" "github.com/cilium/workerpool" @@ -32,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/kubectl/pkg/util/podutils" + metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1" "github.com/cilium/cilium/cilium-cli/defaults" "github.com/cilium/cilium/cilium-cli/k8s" @@ -626,6 +629,52 @@ func (c *Collector) Run() error { return nil }, }, + { + Description: "Collecting Kubernetes nodes memory/cpu usage", + Quick: true, + Task: func(ctx context.Context) error { + // Use the raw client to get the table format directly from the metrics API + // This gives us the same output as kubectl top nodes + result, err := c.Client.GetRaw(ctx, "/apis/metrics.k8s.io/v1beta1/nodes") + if err != nil { + return fmt.Errorf("failed to collect Kubernetes nodes memory/cpu usage: %w", err) + } + + // Parse the JSON response and format as table + output, err := c.formatNodeMetricsAsTable(result) + if err != nil { + return fmt.Errorf("failed to format node memory/cpu usage metrics: %w", err) + } + + if err := c.WriteString(kubernetesTopNodesFileName, output); err != nil { + return fmt.Errorf("failed to collect Kubernetes nodes memory/cpu usage: %w", err) + } + return nil + }, + }, + { + Description: "Collecting Kubernetes pods memory/cpu usage", + Quick: true, + Task: func(ctx context.Context) error { + // Use the raw client to get the table format directly from the metrics API + // This gives us the same output as kubectl top pods + result, err := c.Client.GetRaw(ctx, "/apis/metrics.k8s.io/v1beta1/pods") + if err != nil { + return fmt.Errorf("failed to collect Kubernetes pods memory/cpu usage: %w", err) + } + + // Parse the JSON response and format as table + output, err := c.formatPodMetricsAsTable(result) + if err != nil { + return fmt.Errorf("failed to format pod memory/cpu usage metrics: %w", err) + } + + if err := c.WriteString(kubernetesTopPodsFileName, output); err != nil { + return fmt.Errorf("failed to collect Kubernetes pods memory/cpu usage: %w", err) + } + return nil + }, + }, { Description: "Collecting crashed test pod logs", Quick: false, @@ -3294,6 +3343,115 @@ func InitSysdumpFlags(cmd *cobra.Command, options *Options, optionPrefix string, hooks.AddSysdumpFlags(cmd.Flags()) } +// formatMetricsAsTable formats the raw metrics JSON into a table format like kubectl top nodes +func (c *Collector) formatNodeMetricsAsTable(rawMetrics string) (string, error) { + // Parse the metrics JSON + var nodeMetrics metricsapi.NodeMetricsList + if err := json.Unmarshal([]byte(rawMetrics), &nodeMetrics); err != nil { + return "", fmt.Errorf("failed to parse metrics JSON: %w", err) + } + + // Get node information to calculate percentages + nodes, err := c.Client.ListNodes(context.Background(), metav1.ListOptions{}) + if err != nil { + return "", fmt.Errorf("failed to get nodes: %w", err) + } + + // Create a map of node names to their capacity + nodeCapacities := make(map[string]corev1.ResourceList) + for _, node := range nodes.Items { + nodeCapacities[node.Name] = node.Status.Capacity + } + + var sb strings.Builder + tw := tabwriter.NewWriter(&sb, 0, 0, 2, ' ', 0) + fmt.Fprintln(tw, "NAME\tCPU(cores)\tCPU(%)\tMEMORY(bytes)\tMEMORY(%)") + + for _, metric := range nodeMetrics.Items { + name := metric.Name + + // Get current usage + cpuUsage := metric.Usage[corev1.ResourceCPU] + memUsage := metric.Usage[corev1.ResourceMemory] + + cpuMillis := cpuUsage.MilliValue() + memBytes := memUsage.Value() + memMi := float64(memBytes) / (1024 * 1024) + + // Calculate percentages if we have capacity info + cpuPercent := "" + memPercent := "" + + if capacity, exists := nodeCapacities[metric.Name]; exists { + if cpuCap := capacity[corev1.ResourceCPU]; !cpuCap.IsZero() { + cpuCapMillis := cpuCap.MilliValue() + cpuPct := float64(cpuMillis) / float64(cpuCapMillis) * 100 + cpuPercent = fmt.Sprintf("%.0f%%", cpuPct) + } + + if memCap := capacity[corev1.ResourceMemory]; !memCap.IsZero() { + memCapBytes := memCap.Value() + memPct := float64(memBytes) / float64(memCapBytes) * 100 + memPercent = fmt.Sprintf("%.0f%%", memPct) + } + } + + fmt.Fprintf(tw, "%s\t%dm\t%s\t%dMi\t%s\n", + name, + cpuMillis, + cpuPercent, + int64(memMi), + memPercent) + } + + tw.Flush() + + return sb.String(), nil +} + +// formatPodMetricsAsTable formats the raw pod metrics JSON into a table format like kubectl top pods +func (c *Collector) formatPodMetricsAsTable(rawMetrics string) (string, error) { + // Parse the metrics JSON + var podMetrics metricsapi.PodMetricsList + if err := json.Unmarshal([]byte(rawMetrics), &podMetrics); err != nil { + return "", fmt.Errorf("failed to parse pod metrics JSON: %w", err) + } + + var sb strings.Builder + tw := tabwriter.NewWriter(&sb, 0, 0, 2, ' ', 0) + fmt.Fprintln(tw, "NAMESPACE\tNAME\tCPU(cores)\tMEMORY(bytes)") + + for _, metric := range podMetrics.Items { + namespace := metric.Namespace + name := metric.Name + + // Sum CPU and memory usage across all containers in the pod + var totalCPUMillis int64 + var totalMemBytes int64 + + for _, container := range metric.Containers { + if cpuUsage, exists := container.Usage[corev1.ResourceCPU]; exists { + totalCPUMillis += cpuUsage.MilliValue() + } + if memUsage, exists := container.Usage[corev1.ResourceMemory]; exists { + totalMemBytes += memUsage.Value() + } + } + + memMi := float64(totalMemBytes) / (1024 * 1024) + + fmt.Fprintf(tw, "%s\t%s\t%dm\t%dMi\n", + namespace, + name, + totalCPUMillis, + int64(memMi)) + } + + tw.Flush() + + return sb.String(), nil +} + // Hooks to extend cilium-cli with additional sysdump tasks and related flags. type Hooks interface { AddSysdumpFlags(flags *pflag.FlagSet) diff --git a/cilium-dbg/cmd/bpf_vtep_policy.go b/cilium-dbg/cmd/bpf_vtep_policy.go new file mode 100644 index 0000000000000..b583461bc68d0 --- /dev/null +++ b/cilium-dbg/cmd/bpf_vtep_policy.go @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cmd + +import ( + "github.com/spf13/cobra" +) + +// BPFVtepPolicyCmd represents the bpf command +var BPFVtepPolicyCmd = &cobra.Command{ + Use: "vtep-policy", + Short: "Manage the VTEP Policy mappings", +} + +func init() { + BPFCmd.AddCommand(BPFVtepPolicyCmd) +} diff --git a/cilium-dbg/cmd/bpf_vtep_policy_delete.go b/cilium-dbg/cmd/bpf_vtep_policy_delete.go new file mode 100644 index 0000000000000..306dc2e3993b2 --- /dev/null +++ b/cilium-dbg/cmd/bpf_vtep_policy_delete.go @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cmd + +import ( + "net/netip" + + "github.com/spf13/cobra" + + "github.com/cilium/cilium/pkg/common" + "github.com/cilium/cilium/pkg/maps/vtep_policy" +) + +const ( + vtepPolicyDelUsage = "Delete vtep entries using vtep CIDR.\n" +) + +var bpfVtepPolicyDeleteCmd = &cobra.Command{ + Args: cobra.ExactArgs(2), + Use: "delete", + Short: "Delete VTEP Policy entries", + Long: vtepPolicyDelUsage, + Run: func(cmd *cobra.Command, args []string) { + common.RequireRootPrivilege("cilium bpf vtep-policy delete ") + + vtep, err := vtep_policy.OpenPinnedVtepPolicyMap(log) + if err != nil { + Fatalf("Unable to open map: %s", err) + } + + src_ip, err := netip.ParseAddr(args[0]) + if err != nil { + Fatalf("Unable to parse IP '%s'", args[0]) + } + + dst_cidr, err := netip.ParsePrefix(args[1]) + if err != nil { + Fatalf("error parsing cidr %s: %s", args[1], err) + } + + if err := vtep.RemoveVtepPolicyMapping(src_ip, dst_cidr); err != nil { + Fatalf("error deleting contents of map: %s\n", err) + } + }, +} + +func init() { + BPFVtepPolicyCmd.AddCommand(bpfVtepPolicyDeleteCmd) +} diff --git a/cilium-dbg/cmd/bpf_vtep_policy_list.go b/cilium-dbg/cmd/bpf_vtep_policy_list.go new file mode 100644 index 0000000000000..d3d1dd8b4d734 --- /dev/null +++ b/cilium-dbg/cmd/bpf_vtep_policy_list.go @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/cilium/cilium/pkg/command" + "github.com/cilium/cilium/pkg/common" + "github.com/cilium/cilium/pkg/maps/vtep_policy" +) + +const ( + vtepPolicyCidrTitle = "SourceIP DestinationCIDR" + vtepPolicyTitle = "VTEP IP/MAC" +) + +var ( + vtepPolicyListUsage = "List VTEP CIDR and their corresponding VTEP MAC/IP.\n" +) + +var bpfVtepPolicyListCmd = &cobra.Command{ + Use: "list", + Aliases: []string{"ls"}, + Short: "List VTEP Policy entries", + Long: vtepPolicyListUsage, + Run: func(cmd *cobra.Command, args []string) { + common.RequireRootPrivilege("cilium bpf vtep-policy list") + + vtep, err := vtep_policy.OpenPinnedVtepPolicyMap(log) + if err != nil { + Fatalf("Unable to open map: %s", err) + } + + bpfVtepList := make(map[string][]string) + parse := func(key *vtep_policy.VtepPolicyKey, val *vtep_policy.VtepPolicyVal) { + bpfVtepList[key.String()] = append(bpfVtepList[key.String()], val.VtepIp.String()) + bpfVtepList[key.String()] = append(bpfVtepList[key.String()], val.Mac.String()) + } + + if err := vtep.IterateWithCallback(parse); err != nil { + Fatalf("Error dumping contents of egress policy map: %s\n", err) + } + + if command.OutputOption() { + if err := command.PrintOutput(bpfVtepList); err != nil { + fmt.Fprintf(os.Stderr, "error getting output of map in %s: %s\n", command.OutputOptionString(), err) + os.Exit(1) + } + return + } + + if len(bpfVtepList) == 0 { + fmt.Fprintf(os.Stderr, "No entries found.\n") + } else { + TablePrinter(vtepPolicyCidrTitle, vtepPolicyTitle, bpfVtepList) + } + }, +} + +func init() { + BPFVtepPolicyCmd.AddCommand(bpfVtepPolicyListCmd) + command.AddOutputOption(bpfVtepPolicyListCmd) +} diff --git a/cilium-dbg/cmd/bpf_vtep_policy_update.go b/cilium-dbg/cmd/bpf_vtep_policy_update.go new file mode 100644 index 0000000000000..f491ae5e310df --- /dev/null +++ b/cilium-dbg/cmd/bpf_vtep_policy_update.go @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cmd + +import ( + "fmt" + "net/netip" + "os" + + "github.com/spf13/cobra" + + "github.com/cilium/cilium/pkg/common" + "github.com/cilium/cilium/pkg/mac" + "github.com/cilium/cilium/pkg/maps/vtep_policy" +) + +const ( + vtepPolUpdateUsage = "Create/Update vtep entry.\n" +) + +var bpfVtepPolicyUpdateCmd = &cobra.Command{ + Args: cobra.ExactArgs(4), + Use: "update", + Short: "Update VTEP Policy entries", + Aliases: []string{"add"}, + Long: vtepPolUpdateUsage, + Run: func(cmd *cobra.Command, args []string) { + common.RequireRootPrivilege("cilium bpf vtep-policy update ") + + vtep, err := vtep_policy.OpenPinnedVtepPolicyMap(log) + if err != nil { + Fatalf("Unable to open map: %s", err) + } + + src_ip, err := netip.ParseAddr(args[0]) + if err != nil { + Fatalf("Unable to parse IP '%s'", args[0]) + } + + dst_cidr, err := netip.ParsePrefix(args[1]) + if err != nil { + Fatalf("error parsing cidr %s: %s", args[1], err) + } + + vtep_ip, err := netip.ParseAddr(args[2]) + if err != nil { + Fatalf("Unable to parse IP '%s'", args[2]) + } + + rmac, err := mac.ParseMAC(args[3]) + if err != nil { + Fatalf("Unable to parse vtep mac '%s'", args[3]) + } + + if err := vtep.UpdateVtepPolicyMapping(src_ip, dst_cidr, vtep_ip, rmac); err != nil { + fmt.Fprintf(os.Stderr, "error updating contents of map: %s\n", err) + os.Exit(1) + } + }, +} + +func init() { + BPFVtepPolicyCmd.AddCommand(bpfVtepPolicyUpdateCmd) +} diff --git a/cilium-dbg/cmd/metrics_list.go b/cilium-dbg/cmd/metrics_list.go index d835312c8efd4..f80f2400606ff 100644 --- a/cilium-dbg/cmd/metrics_list.go +++ b/cilium-dbg/cmd/metrics_list.go @@ -32,4 +32,5 @@ func init() { MetricsCmd.AddCommand(MetricsListCmd) MetricsListCmd.Flags().StringVarP(&matchPattern, "match-pattern", "p", "", "Show only metrics whose names match matchpattern") command.AddOutputOption(MetricsListCmd) + shell.AddShellSockOption(MetricsListCmd) } diff --git a/cilium-dbg/cmd/post_uninstall_cleanup.go b/cilium-dbg/cmd/post_uninstall_cleanup.go index 3db5fef33bad6..59a7fcaacd632 100644 --- a/cilium-dbg/cmd/post_uninstall_cleanup.go +++ b/cilium-dbg/cmd/post_uninstall_cleanup.go @@ -60,16 +60,15 @@ const ( ) const ( - ciliumLinkPrefix = "cilium_" - ciliumNetNSPrefix = "cilium-" - hostLinkPrefix = "lxc" - hostLinkLen = len(hostLinkPrefix + "XXXXX") - cniPath = "/etc/cni/net.d" - cniConfigV1 = cniPath + "/10-cilium-cni.conf" - cniConfigV2 = cniPath + "/00-cilium-cni.conf" - cniConfigV3 = cniPath + "/05-cilium-cni.conf" - cniConfigV4 = cniPath + "/05-cilium.conf" - cniConfigV5 = cniPath + "/05-cilium.conflist" + ciliumLinkPrefix = "cilium_" + hostLinkPrefix = "lxc" + hostLinkLen = len(hostLinkPrefix + "XXXXX") + cniPath = "/etc/cni/net.d" + cniConfigV1 = cniPath + "/10-cilium-cni.conf" + cniConfigV2 = cniPath + "/00-cilium-cni.conf" + cniConfigV3 = cniPath + "/05-cilium-cni.conf" + cniConfigV4 = cniPath + "/05-cilium.conf" + cniConfigV5 = cniPath + "/05-cilium.conflist" ) func init() { diff --git a/clustermesh-apiserver/kvstoremesh/cells.go b/clustermesh-apiserver/kvstoremesh/cells.go index 4ff8ce40d17ce..5e60e9a957684 100644 --- a/clustermesh-apiserver/kvstoremesh/cells.go +++ b/clustermesh-apiserver/kvstoremesh/cells.go @@ -37,6 +37,7 @@ var Cell = cell.Module( }), heartbeat.Cell, + cell.Provide(kvstoremesh.NewSyncWaiter), cell.Invoke(func(*kvstoremesh.KVStoreMesh) {}), ), diff --git a/clustermesh-apiserver/kvstoremesh/root.go b/clustermesh-apiserver/kvstoremesh/root.go index c3bc3065e6bfe..6807bc9ca391b 100644 --- a/clustermesh-apiserver/kvstoremesh/root.go +++ b/clustermesh-apiserver/kvstoremesh/root.go @@ -11,11 +11,9 @@ import ( "time" "github.com/cilium/hive/cell" - "github.com/cilium/hive/job" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" - "github.com/cilium/cilium/clustermesh-apiserver/syncstate" "github.com/cilium/cilium/pkg/clustermesh/kvstoremesh" "github.com/cilium/cilium/pkg/hive" "github.com/cilium/cilium/pkg/kvstore" @@ -70,10 +68,8 @@ type params struct { Metrics kvstoremesh.Metrics Shutdowner hive.Shutdowner Log *slog.Logger - SyncState syncstate.SyncState - JobGroup job.Group + SyncWaiter kvstoremesh.SyncWaiter KVStoreMesh *kvstoremesh.KVStoreMesh - Health cell.Health } func registerLeaderElectionHooks(lc cell.Lifecycle, llc *LeaderLifecycle, params params) { @@ -120,7 +116,7 @@ func runLeaderElection(ctx context.Context, lc *LeaderLifecycle, params params) if err != nil && errors.Is(err, kvstore.ErrEtcdTimeout) { // signal readiness - params.SyncState.Stop() + params.SyncWaiter.ForceReady() // try again with infinite timeout params.Log.Info("Reattempting to acquire leader election lock") @@ -148,15 +144,6 @@ func runLeaderElection(ctx context.Context, lc *LeaderLifecycle, params params) params.Log.Info("Leader election lock acquired") params.Metrics.LeaderElectionStatus.With(prometheus.Labels{metrics.LabelLeaderElectionName: "kvstoremesh"}).Set(float64(1)) - kvstoremesh.RegisterSyncWaiter(kvstoremesh.SyncWaiterParams{ - KVStoreMesh: params.KVStoreMesh, - SyncState: params.SyncState, - Lifecycle: lc, - JobGroup: params.JobGroup, - Health: params.Health, - }) - params.SyncState.Stop() - err = lc.Start(params.Log, ctx) if err != nil { params.Log.Error("Failed to run KvStoreMesh", logfields.Error, err) diff --git a/contrib/scripts/k8s-manifests-gen.sh b/contrib/scripts/k8s-manifests-gen.sh index ffb18e7be5cad..22f8414cff636 100755 --- a/contrib/scripts/k8s-manifests-gen.sh +++ b/contrib/scripts/k8s-manifests-gen.sh @@ -40,7 +40,8 @@ CRDS_CILIUM_V2ALPHA1="ciliumendpointslices \ ciliumbgppeeringpolicies \ ciliuml2announcementpolicies \ ciliumpodippools \ - ciliumgatewayclassconfigs" + ciliumgatewayclassconfigs \ + ciliumvteppolicies" TMPDIR=$(mktemp -d -t cilium.tmpXXXXXXXX) go run sigs.k8s.io/controller-tools/cmd/controller-gen ${CRD_OPTIONS} paths="${CRD_PATHS}" output:crd:artifacts:config="${TMPDIR}" diff --git a/contrib/testing/kind-values.yaml b/contrib/testing/kind-values.yaml index 9be35a51f3fe0..29473b60e2692 100644 --- a/contrib/testing/kind-values.yaml +++ b/contrib/testing/kind-values.yaml @@ -6,3 +6,6 @@ operator: override: "localhost:5000/cilium/operator-generic:local" pullPolicy: Never suffix: "" +vtep: + enabled: true + mask: "255.255.255.0" diff --git a/daemon/cmd/cells.go b/daemon/cmd/cells.go index 2024e1fea86b7..80730fa5e69af 100644 --- a/daemon/cmd/cells.go +++ b/daemon/cmd/cells.go @@ -13,6 +13,8 @@ import ( "github.com/cilium/statedb" "google.golang.org/grpc" + "github.com/cilium/cilium/pkg/healthconfig" + healthApi "github.com/cilium/cilium/api/v1/health/server" "github.com/cilium/cilium/api/v1/server" "github.com/cilium/cilium/daemon/cmd/cni" @@ -78,6 +80,8 @@ import ( "github.com/cilium/cilium/pkg/signal" "github.com/cilium/cilium/pkg/source" "github.com/cilium/cilium/pkg/status" + "github.com/cilium/cilium/pkg/svcrouteconfig" + "github.com/cilium/cilium/pkg/vteppolicy" ) var ( @@ -332,11 +336,19 @@ var ( // Cilium health infrastructure (host and endpoint connectivity) health.Cell, + // Cilium health config + healthconfig.Cell, + // Cilium Status Collector status.Cell, // Cilium Debuginfo API debugapi.Cell, + + svcrouteconfig.Cell, + + // VTEP Policy allows two-way communication with an external VXLAN gateway + vteppolicy.Cell, ) ) diff --git a/daemon/cmd/daemon.go b/daemon/cmd/daemon.go index e64ce47be9f31..398007fcf65b0 100644 --- a/daemon/cmd/daemon.go +++ b/daemon/cmd/daemon.go @@ -23,7 +23,6 @@ import ( linuxrouting "github.com/cilium/cilium/pkg/datapath/linux/routing" "github.com/cilium/cilium/pkg/datapath/linux/safenetlink" datapathTables "github.com/cilium/cilium/pkg/datapath/tables" - "github.com/cilium/cilium/pkg/datapath/tunnel" datapath "github.com/cilium/cilium/pkg/datapath/types" "github.com/cilium/cilium/pkg/debug" "github.com/cilium/cilium/pkg/defaults" @@ -34,6 +33,7 @@ import ( "github.com/cilium/cilium/pkg/endpoint/regeneration" "github.com/cilium/cilium/pkg/endpointmanager" "github.com/cilium/cilium/pkg/health" + "github.com/cilium/cilium/pkg/healthconfig" "github.com/cilium/cilium/pkg/identity" identitycell "github.com/cilium/cilium/pkg/identity/cache/cell" "github.com/cilium/cilium/pkg/identity/identitymanager" @@ -133,6 +133,8 @@ type Daemon struct { lbConfig loadbalancer.Config kprCfg kpr.KPRConfig + healthConfig healthconfig.CiliumHealthConfig + ipsecAgent datapath.IPsecAgent } @@ -227,17 +229,6 @@ func newDaemon(ctx context.Context, cleaner *daemonCleanup, params *daemonParams bootstrapStats.daemonInit.Start() - // EncryptedOverlay feature must check the TunnelProtocol if enabled, since - // it only supports VXLAN right now. - if params.IPsecAgent.Enabled() && params.IPSecConfig.EncryptedOverlayEnabled() { - if !option.Config.TunnelingEnabled() { - return nil, nil, fmt.Errorf("EncryptedOverlay support requires VXLAN tunneling mode") - } - if params.TunnelConfig.EncapProtocol() != tunnel.VXLAN { - return nil, nil, fmt.Errorf("EncryptedOverlay support requires VXLAN tunneling protocol") - } - } - // WireGuard and IPSec are mutually exclusive. if params.IPsecAgent.Enabled() && params.WGAgent.Enabled() { return nil, nil, fmt.Errorf("WireGuard (--%s) cannot be used with IPsec (--%s)", wgTypes.EnableWireguard, datapath.EnableIPSec) @@ -255,10 +246,6 @@ func newDaemon(ctx context.Context, cleaner *daemonCleanup, params *daemonParams } } - if params.IPSecConfig.EncryptedOverlayEnabled() && !params.IPsecAgent.Enabled() { - params.Logger.Warn("IPSec encrypted overlay is enabled but IPSec is not. Ignoring option.") - } - if option.Config.EnableHostFirewall { if params.IPsecAgent.Enabled() { return nil, nil, fmt.Errorf("IPSec cannot be used with the host firewall.") @@ -346,6 +333,7 @@ func newDaemon(ctx context.Context, cleaner *daemonCleanup, params *daemonParams ipsecAgent: params.IPsecAgent, ciliumHealth: params.CiliumHealth, endpointAPIFence: params.EndpointAPIFence, + healthConfig: params.HealthConfig, } // initialize endpointRestoreComplete channel as soon as possible so that subsystems diff --git a/daemon/cmd/daemon_main.go b/daemon/cmd/daemon_main.go index 876a678d21400..a57792d20b4ea 100644 --- a/daemon/cmd/daemon_main.go +++ b/daemon/cmd/daemon_main.go @@ -53,6 +53,7 @@ import ( "github.com/cilium/cilium/pkg/fqdn/bootstrap" "github.com/cilium/cilium/pkg/fqdn/namemanager" "github.com/cilium/cilium/pkg/health" + "github.com/cilium/cilium/pkg/healthconfig" "github.com/cilium/cilium/pkg/hive" "github.com/cilium/cilium/pkg/identity" identitycell "github.com/cilium/cilium/pkg/identity/cache/cell" @@ -227,18 +228,12 @@ func InitGlobalFlags(logger *slog.Logger, cmd *cobra.Command, vp *viper.Viper) { flags.Bool(option.EnableEndpointRoutes, defaults.EnableEndpointRoutes, "Use per endpoint routes instead of routing via cilium_host") option.BindEnv(vp, option.EnableEndpointRoutes) - flags.Bool(option.EnableHealthChecking, defaults.EnableHealthChecking, "Enable connectivity health checking") - option.BindEnv(vp, option.EnableHealthChecking) - flags.Bool(option.AgentHealthRequireK8sConnectivity, true, "Require Kubernetes connectivity in agent health endpoint") option.BindEnv(vp, option.AgentHealthRequireK8sConnectivity) flags.Bool(option.EnableHealthCheckLoadBalancerIP, defaults.EnableHealthCheckLoadBalancerIP, "Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs --enable-health-check-nodeport to be enabled") option.BindEnv(vp, option.EnableHealthCheckLoadBalancerIP) - flags.Bool(option.EnableEndpointHealthChecking, defaults.EnableEndpointHealthChecking, "Enable connectivity health checking between virtual endpoints") - option.BindEnv(vp, option.EnableEndpointHealthChecking) - flags.Int(option.HealthCheckICMPFailureThreshold, defaults.HealthCheckICMPFailureThreshold, "Number of ICMP requests sent for each run of the health checker. If at least one ICMP response is received, the node or endpoint is marked as healthy.") option.BindEnv(vp, option.HealthCheckICMPFailureThreshold) @@ -871,6 +866,9 @@ func InitGlobalFlags(logger *slog.Logger, cmd *cobra.Command, vp *viper.Viper) { flags.Bool(option.EnableExtendedIPProtocols, defaults.EnableExtendedIPProtocols, "Enable traffic with extended IP protocols in datapath") option.BindEnv(vp, option.EnableExtendedIPProtocols) + flags.Uint8(option.IPTracingOptionType, 0, "Specifies what IPv4 option type should be used to extract trace information from a packet; a value of 0 (default) disables IP tracing.") + option.BindEnv(vp, option.IPTracingOptionType) + if err := vp.BindPFlags(flags); err != nil { logging.Fatal(logger, "BindPFlags failed", logfields.Error, err) } @@ -947,10 +945,6 @@ func initEnv(logger *slog.Logger, vp *viper.Viper) { logger.Warn("Unknown verbose debug group", logfields.Group, grp) } } - // Enable policy debugging if debug is enabled. - if option.Config.Debug { - option.Config.Opts.SetBool(option.DebugPolicy, true) - } common.RequireRootPrivilege("cilium-agent") @@ -1314,6 +1308,7 @@ type daemonParams struct { KPRConfig kpr.KPRConfig EndpointAPIFence endpointapi.Fence IPSecConfig datapath.IPsecConfig + HealthConfig healthconfig.CiliumHealthConfig } func newDaemonPromise(params daemonParams) (promise.Promise[*Daemon], legacy.DaemonInitialization) { @@ -1513,7 +1508,7 @@ func startDaemon(d *Daemon, restoredEndpoints *endpointRestoreState, cleaner *da } bootstrapStats.healthCheck.Start() - if option.Config.EnableHealthChecking { + if d.healthConfig.IsHealthCheckingEnabled() { if err := d.ciliumHealth.Init(d.ctx, d.healthEndpointRouting, cleaner.cleanupFuncs.Add); err != nil { return fmt.Errorf("failed to initialize cilium health: %w", err) } diff --git a/examples/kubernetes/nginx-no-track-host-ports.yaml b/examples/kubernetes/nginx-no-track-host-ports.yaml new file mode 100644 index 0000000000000..be57d9d62f60e --- /dev/null +++ b/examples/kubernetes/nginx-no-track-host-ports.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: example + annotations: + network.cilium.io/no-track-host-ports: "8080/tcp,80/tcp,443/tcp" +spec: + hostNetwork: true + containers: + - image: nginx + imagePullPolicy: Always + name: nginx diff --git a/go.mod b/go.mod index 07d4be0e64e7f..71690fbdbc3ae 100644 --- a/go.mod +++ b/go.mod @@ -4,19 +4,19 @@ go 1.25.0 require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7 v7.0.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v7 v7.0.0 github.com/aliyun/alibaba-cloud-sdk-go v1.63.107 - github.com/aws/aws-sdk-go-v2 v1.38.3 - github.com/aws/aws-sdk-go-v2/config v1.31.6 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.251.0 + github.com/aws/aws-sdk-go-v2 v1.39.0 + github.com/aws/aws-sdk-go-v2/config v1.31.8 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.253.0 github.com/aws/smithy-go v1.23.0 github.com/blang/semver/v4 v4.0.0 github.com/cespare/xxhash/v2 v2.3.0 - github.com/cilium/charts v0.0.0-20250815135331-8443b5068217 + github.com/cilium/charts v0.0.0-20250904103431-47fb6519b6bb github.com/cilium/coverbee v0.3.3-0.20240723084546-664438750fce github.com/cilium/dns v1.1.51-0.20240603182237-af788769786a github.com/cilium/ebpf v0.19.1-0.20250818092626-ae226118949d @@ -32,7 +32,7 @@ require ( github.com/containernetworking/cni v1.3.0 github.com/coreos/go-systemd/v22 v22.6.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/docker/docker v28.3.3+incompatible + github.com/docker/docker v28.4.0+incompatible github.com/docker/libnetwork v0.8.0-dev.2.0.20210525090646-64b7a4574d14 github.com/envoyproxy/go-control-plane/contrib v1.32.5-0.20250809052208-d8ab4c219945 github.com/envoyproxy/go-control-plane/envoy v1.32.5-0.20250809052208-d8ab4c219945 @@ -48,7 +48,7 @@ require ( github.com/go-openapi/swag v0.24.1 github.com/go-openapi/validate v0.24.0 github.com/gogo/protobuf v1.3.2 - github.com/google/cel-go v0.26.0 + github.com/google/cel-go v0.26.1 github.com/google/go-cmp v0.7.0 github.com/google/go-github/v74 v74.0.0 github.com/google/go-licenses v1.6.1-0.20230903011517-706b9c60edd4 @@ -80,17 +80,17 @@ require ( github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 github.com/prometheus-community/pro-bing v0.7.0 - github.com/prometheus/client_golang v1.23.0 + github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_model v0.6.2 - github.com/prometheus/common v0.65.0 + github.com/prometheus/common v0.66.1 github.com/prometheus/procfs v0.17.0 github.com/russross/blackfriday/v2 v2.1.0 - github.com/sasha-s/go-deadlock v0.3.5 + github.com/sasha-s/go-deadlock v0.3.6 github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.14.0 github.com/spf13/cast v1.9.2 - github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.7 + github.com/spf13/cobra v1.10.1 + github.com/spf13/pflag v1.0.10 github.com/spf13/viper v1.20.1 github.com/spiffe/go-spiffe/v2 v2.6.0 github.com/spiffe/spire-api-sdk v1.12.4 @@ -109,33 +109,34 @@ require ( go.yaml.in/yaml/v3 v3.0.4 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba golang.org/x/crypto v0.41.0 - golang.org/x/mod v0.27.0 + golang.org/x/mod v0.28.0 golang.org/x/net v0.43.0 - golang.org/x/oauth2 v0.30.0 - golang.org/x/sync v0.16.0 - golang.org/x/sys v0.35.0 - golang.org/x/term v0.34.0 - golang.org/x/text v0.28.0 - golang.org/x/time v0.12.0 + golang.org/x/oauth2 v0.31.0 + golang.org/x/sync v0.17.0 + golang.org/x/sys v0.36.0 + golang.org/x/term v0.35.0 + golang.org/x/text v0.29.0 + golang.org/x/time v0.13.0 golang.org/x/tools v0.36.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20241231184526-a9ab2273dd10 - google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c + google.golang.org/genproto/googleapis/rpc v0.0.0-20250826171959-ef028d996bc1 google.golang.org/grpc v1.75.0 google.golang.org/protobuf v1.36.8 - helm.sh/helm/v3 v3.18.5 - k8s.io/api v0.34.0 - k8s.io/apiextensions-apiserver v0.34.0 - k8s.io/apimachinery v0.34.0 - k8s.io/cli-runtime v0.34.0 - k8s.io/client-go v0.34.0 - k8s.io/component-base v0.34.0 + helm.sh/helm/v3 v3.18.6 + k8s.io/api v0.34.1 + k8s.io/apiextensions-apiserver v0.34.1 + k8s.io/apimachinery v0.34.1 + k8s.io/cli-runtime v0.34.1 + k8s.io/client-go v0.34.1 + k8s.io/component-base v0.34.1 k8s.io/endpointslice v0.34.0 k8s.io/klog/v2 v2.130.1 - k8s.io/kubectl v0.34.0 + k8s.io/kubectl v0.34.1 + k8s.io/metrics v0.34.1 k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/gateway-api v1.3.1-0.20250611112659-17a60f668a0d - sigs.k8s.io/mcs-api v0.1.1-0.20250610011024-38bab5ba476b + sigs.k8s.io/mcs-api v0.3.1-0.20250908090929-79efdd37ed2b sigs.k8s.io/mcs-api/controllers v0.0.0-20250731081715-a807ec696257 sigs.k8s.io/yaml v1.6.0 ) @@ -146,7 +147,7 @@ require ( github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20250520111509-a70c2aa677fa // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect github.com/BurntSushi/toml v1.5.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect @@ -157,15 +158,15 @@ require ( github.com/alecthomas/participle/v2 v2.1.0 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.18.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.12 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/cilium/deepequal-gen v0.0.0-20241016021505-f57df2fe2e62 // indirect @@ -273,7 +274,7 @@ require ( github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect + github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe // indirect github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/rivo/uniseg v0.4.4 // indirect @@ -314,8 +315,8 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.0 // indirect - k8s.io/apiserver v0.34.0 // indirect - k8s.io/code-generator v0.34.0 // indirect + k8s.io/apiserver v0.34.1 // indirect + k8s.io/code-generator v0.34.1 // indirect k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect oras.land/oras-go/v2 v2.6.0 // indirect diff --git a/go.sum b/go.sum index 43be80348793c..6619ccef2120f 100644 --- a/go.sum +++ b/go.sum @@ -11,10 +11,10 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8af github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20250520111509-a70c2aa677fa h1:x6kFzdPgBoLbyoNkA/jny0ENpoEz4wqY8lPTQL2DPkg= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20250520111509-a70c2aa677fa/go.mod h1:gCLVsLfv1egrcZu+GoJATN5ts75F2s62ih/457eWzOw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0 h1:ci6Yd6nysBRLEodoziB6ah1+YOzZbZk+NYneoA6q+6E= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0/go.mod h1:QyVsSSN64v5TGltphKLQ2sQxe4OBQg0J1eKRcVBnfgE= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 h1:MhRfI58HblXzCtWEZCO0feHs8LweePB3s90r7WaR1KU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0/go.mod h1:okZ+ZURbArNdlJ+ptXoyHNuOETzOl1Oww19rm8I2WLA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 h1:wL5IEG5zb7BVv1Kv0Xm92orq+5hB5Nipn3B5tn4Rqfk= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= @@ -35,8 +35,8 @@ github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEK github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= @@ -76,32 +76,32 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go-v2 v1.38.3 h1:B6cV4oxnMs45fql4yRH+/Po/YU+597zgWqvDpYMturk= -github.com/aws/aws-sdk-go-v2 v1.38.3/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= -github.com/aws/aws-sdk-go-v2/config v1.31.6 h1:a1t8fXY4GT4xjyJExz4knbuoxSCacB5hT/WgtfPyLjo= -github.com/aws/aws-sdk-go-v2/config v1.31.6/go.mod h1:5ByscNi7R+ztvOGzeUaIu49vkMk2soq5NaH5PYe33MQ= -github.com/aws/aws-sdk-go-v2/credentials v1.18.10 h1:xdJnXCouCx8Y0NncgoptztUocIYLKeQxrCgN6x9sdhg= -github.com/aws/aws-sdk-go-v2/credentials v1.18.10/go.mod h1:7tQk08ntj914F/5i9jC4+2HQTAuJirq7m1vZVIhEkWs= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 h1:wbjnrrMnKew78/juW7I2BtKQwa1qlf6EjQgS69uYY14= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6/go.mod h1:AtiqqNrDioJXuUgz3+3T0mBWN7Hro2n9wll2zRUc0ww= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 h1:uF68eJA6+S9iVr9WgX1NaRGyQ/6MdIyc4JNUo6TN1FA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6/go.mod h1:qlPeVZCGPiobx8wb1ft0GHT5l+dc6ldnwInDFaMvC7Y= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 h1:pa1DEC6JoI0zduhZePp3zmhWvk/xxm4NB8Hy/Tlsgos= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6/go.mod h1:gxEjPebnhWGJoaDdtDkA0JX46VRg1wcTHYe63OfX5pE= +github.com/aws/aws-sdk-go-v2 v1.39.0 h1:xm5WV/2L4emMRmMjHFykqiA4M/ra0DJVSWUkDyBjbg4= +github.com/aws/aws-sdk-go-v2 v1.39.0/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/config v1.31.8 h1:kQjtOLlTU4m4A64TsRcqwNChhGCwaPBt+zCQt/oWsHU= +github.com/aws/aws-sdk-go-v2/config v1.31.8/go.mod h1:QPpc7IgljrKwH0+E6/KolCgr4WPLerURiU592AYzfSY= +github.com/aws/aws-sdk-go-v2/credentials v1.18.12 h1:zmc9e1q90wMn8wQbjryy8IwA6Q4XlaL9Bx2zIqdNNbk= +github.com/aws/aws-sdk-go-v2/credentials v1.18.12/go.mod h1:3VzdRDR5u3sSJRI4kYcOSIBbeYsgtVk7dG5R/U6qLWY= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 h1:Is2tPmieqGS2edBnmOJIbdvOA6Op+rRpaYR60iBAwXM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7/go.mod h1:F1i5V5421EGci570yABvpIXgRIBPb5JM+lSkHF6Dq5w= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 h1:UCxq0X9O3xrlENdKf1r9eRJoKz/b0AfGkpp3a7FPlhg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7/go.mod h1:rHRoJUNUASj5Z/0eqI4w32vKvC7atoWR0jC+IkmVH8k= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 h1:Y6DTZUn7ZUC4th9FMBbo8LVE+1fyq3ofw+tRwkUd3PY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7/go.mod h1:x3XE6vMnU9QvHN/Wrx2s44kwzV2o2g5x/siw4ZUJ9g8= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.251.0 h1:hGHSNZDTFnhLGUpRkQORM8uBY9R/FOkxCkuUUJBEOQ4= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.251.0/go.mod h1:SmMqzfS4HVsOD58lwLZ79oxF58f8zVe5YdK3o+/o1Ck= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.253.0 h1:x0v1n45AT+uZvNoQI8xtegVUOZoQIF+s9qwNcl7Ivyg= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.253.0/go.mod h1:MXJiLJZtMqb2dVXgEIn35d5+7MqLd4r8noLen881kpk= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 h1:LHS1YAIJXJ4K9zS+1d/xa9JAA9sL2QyXIQCQFQW/X08= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6/go.mod h1:c9PCiTEuh0wQID5/KqA32J+HAgZxN9tOGXKCiYJjTZI= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 h1:8OLZnVJPvjnrxEwHFg9hVUof/P4sibH+Ea4KKuqAGSg= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.1/go.mod h1:27M3BpVi0C02UiQh1w9nsBEit6pLhlaH3NHna6WUbDE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 h1:gKWSTnqudpo8dAxqBqZnDoDWCiEh/40FziUjr/mo6uA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2/go.mod h1:x7+rkNmRoEN1U13A6JE2fXne9EWyJy54o3n6d4mGaXQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 h1:YZPjhyaGzhDQEvsffDEcpycq49nl7fiGcfJTIo8BszI= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.2/go.mod h1:2dIN8qhQfv37BdUYGgEC8Q3tteM3zFxTI1MLO2O3J3c= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 h1:mLgc5QIgOy26qyh5bvW+nDoAppxgn3J2WV3m9ewq7+8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7/go.mod h1:wXb/eQnqt8mDQIQTTmcw58B5mYGxzLGZGK8PWNFZ0BA= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 h1:7PKX3VYsZ8LUWceVRuv0+PU+E7OtQb1lgmi5vmUE9CM= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.3/go.mod h1:Ql6jE9kyyWI5JHn+61UT/Y5Z0oyVJGmgmJbZD5g4unY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4 h1:e0XBRn3AptQotkyBFrHAxFB8mDhAIOfsG+7KyJ0dg98= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4/go.mod h1:XclEty74bsGBCr1s0VSaA11hQ4ZidK4viWK7rRfO88I= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 h1:PR00NXRYgY4FWHqOGx3fC3lhVKjsp1GdloDv2ynMSd8= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.4/go.mod h1:Z+Gd23v97pX9zK97+tX4ppAgqCt3Z2dIXB02CtBncK8= github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -118,8 +118,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/cilium/charts v0.0.0-20250815135331-8443b5068217 h1:selLLU2HQb1FGxHdGF3o/m0s8fD1JMgc6paMmhwdrBA= -github.com/cilium/charts v0.0.0-20250815135331-8443b5068217/go.mod h1:M3C9VOlFvRzuV+a01t07Tw4uFLSfkCH3L542IWjf6BU= +github.com/cilium/charts v0.0.0-20250904103431-47fb6519b6bb h1:mqBXIakWvZaVjxTWW+iyWbGqWRuorB6Sbw+AVSeop38= +github.com/cilium/charts v0.0.0-20250904103431-47fb6519b6bb/go.mod h1:M3C9VOlFvRzuV+a01t07Tw4uFLSfkCH3L542IWjf6BU= github.com/cilium/controller-tools v0.16.5-1 h1:aJLd2riNwIP+qVwA9bFBu2GDk6fQVecJt/0bZF2IYOw= github.com/cilium/controller-tools v0.16.5-1/go.mod h1:8vztuRVzs8IuuJqKqbXCSlXcw+lkAv/M2sTpg55qjMY= github.com/cilium/coverbee v0.3.3-0.20240723084546-664438750fce h1:gqzXY3NuHllVVDw9vD49mlXx+9bYFPlg23rdrkQNFDM= @@ -200,8 +200,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= -github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.4.0+incompatible h1:KVC7bz5zJY/4AZe/78BIvCnPsLaC9T/zh72xnlrTTOk= +github.com/docker/docker v28.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -357,8 +357,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= -github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ= +github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= github.com/google/certificate-transparency-go v1.1.7 h1:IASD+NtgSTJLPdzkthwvAG1ZVbF2WtFg4IvoA68XGSw= github.com/google/certificate-transparency-go v1.1.7/go.mod h1:FSSBo8fyMVgqptbfF6j5p/XNdgQftAhSmXcIxV9iphE= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= @@ -602,8 +602,8 @@ github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNH github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw= -github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe h1:vHpqOnPlnkba8iSxU4j/CvDSS9J4+F4473esQsYLGoE= +github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -619,13 +619,13 @@ github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/prometheus-community/pro-bing v0.7.0 h1:KFYFbxC2f2Fp6c+TyxbCOEarf7rbnzr9Gw8eIb0RfZA= github.com/prometheus-community/pro-bing v0.7.0/go.mod h1:Moob9dvlY50Bfq6i88xIwfyw7xLFHH69LUgx9n5zqCE= -github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= -github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= -github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= @@ -648,8 +648,8 @@ github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsF github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= -github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= -github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= +github.com/sasha-s/go-deadlock v0.3.6 h1:TR7sfOnZ7x00tWPfD397Peodt57KzMDo+9Ae9rMiUmw= +github.com/sasha-s/go-deadlock v0.3.6/go.mod h1:CUqNyyvMxTyjFqDT7MRg9mb4Dv/btmGTqSR+rky/UXo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= @@ -666,11 +666,11 @@ github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= @@ -846,8 +846,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= -golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= +golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -874,8 +874,8 @@ golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= +golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -885,8 +885,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -920,25 +920,25 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -983,8 +983,8 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto/googleapis/api v0.0.0-20250728155136-f173205681a0 h1:0UOBWO4dC+e51ui0NFKSPbkHHiQ4TmrEfEZMLDyRmY8= google.golang.org/genproto/googleapis/api v0.0.0-20250728155136-f173205681a0/go.mod h1:8ytArBbtOy2xfht+y2fqKd5DRDJRUQhqbyEnQ4bDChs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c h1:qXWI/sQtv5UKboZ/zUk7h+mrf/lXORyI+n9DKDAusdg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250826171959-ef028d996bc1 h1:pmJpJEvT846VzausCQ5d7KreSROcDqmO388w5YbnltA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250826171959-ef028d996bc1/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -1038,26 +1038,26 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -helm.sh/helm/v3 v3.18.5 h1:Cc3Z5vd6kDrZq9wO9KxKLNEickiTho6/H/dBNRVSos4= -helm.sh/helm/v3 v3.18.5/go.mod h1:L/dXDR2r539oPlFP1PJqKAC1CUgqHJDLkxKpDGrWnyg= +helm.sh/helm/v3 v3.18.6 h1:S/2CqcYnNfLckkHLI0VgQbxgcDaU3N4A/46E3n9wSNY= +helm.sh/helm/v3 v3.18.6/go.mod h1:L/dXDR2r539oPlFP1PJqKAC1CUgqHJDLkxKpDGrWnyg= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= -k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= -k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc= -k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0= -k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= -k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg= -k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ= -k8s.io/cli-runtime v0.34.0 h1:N2/rUlJg6TMEBgtQ3SDRJwa8XyKUizwjlOknT1mB2Cw= -k8s.io/cli-runtime v0.34.0/go.mod h1:t/skRecS73Piv+J+FmWIQA2N2/rDjdYSQzEE67LUUs8= -k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo= -k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY= -k8s.io/code-generator v0.34.0 h1:Ze2i1QsvUprIlX3oHiGv09BFQRLCz+StA8qKwwFzees= -k8s.io/code-generator v0.34.0/go.mod h1:Py2+4w2HXItL8CGhks8uI/wS3Y93wPKO/9mBQUYNua0= -k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8= -k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg= +k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= +k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= +k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= +k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= +k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= +k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= +k8s.io/cli-runtime v0.34.1 h1:btlgAgTrYd4sk8vJTRG6zVtqBKt9ZMDeQZo2PIzbL7M= +k8s.io/cli-runtime v0.34.1/go.mod h1:aVA65c+f0MZiMUPbseU/M9l1Wo2byeaGwUuQEQVVveE= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= +k8s.io/code-generator v0.34.1 h1:WpphT26E+j7tEgIUfFr5WfbJrktCGzB3JoJH9149xYc= +k8s.io/code-generator v0.34.1/go.mod h1:DeWjekbDnJWRwpw3s0Jat87c+e0TgkxoR4ar608yqvg= +k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= +k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= k8s.io/endpointslice v0.34.0 h1:VdOz9ofkp74+SsuxEO7CEpcvsuCe7zAx0bDJdDU0114= k8s.io/endpointslice v0.34.0/go.mod h1:aUArEJwcmRHkFG91fXsMmJXlGzlsRNfWsWNlaq6Rhqo= k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q= @@ -1066,8 +1066,10 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs= -k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4= +k8s.io/kubectl v0.34.1 h1:1qP1oqT5Xc93K+H8J7ecpBjaz511gan89KO9Vbsh/OI= +k8s.io/kubectl v0.34.1/go.mod h1:JRYlhJpGPyk3dEmJ+BuBiOB9/dAvnrALJEiY/C5qa6A= +k8s.io/metrics v0.34.1 h1:374Rexmp1xxgRt64Bi0TsjAM8cA/Y8skwCoPdjtIslE= +k8s.io/metrics v0.34.1/go.mod h1:Drf5kPfk2NJrlpcNdSiAAHn/7Y9KqxpRNagByM7Ei80= k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= @@ -1087,8 +1089,8 @@ sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= -sigs.k8s.io/mcs-api v0.1.1-0.20250610011024-38bab5ba476b h1:Yw4HHwvbgioWqvyUauZM4pvf2QrMNM38ILbKV46Niio= -sigs.k8s.io/mcs-api v0.1.1-0.20250610011024-38bab5ba476b/go.mod h1:zZ5CK8uS6HaLkxY4HqsmcBHfzHuNMrY2uJy8T7jffK4= +sigs.k8s.io/mcs-api v0.3.1-0.20250908090929-79efdd37ed2b h1:w9u8OrCeb+WjmF/6KHWtKGj6nEGF8XUxlghztKW87TQ= +sigs.k8s.io/mcs-api v0.3.1-0.20250908090929-79efdd37ed2b/go.mod h1:zZ5CK8uS6HaLkxY4HqsmcBHfzHuNMrY2uJy8T7jffK4= sigs.k8s.io/mcs-api/controllers v0.0.0-20250731081715-a807ec696257 h1:xxfmazcj83RrfIPC5nAjBpe/0/hI/m+/GaGV+ekJHGQ= sigs.k8s.io/mcs-api/controllers v0.0.0-20250731081715-a807ec696257/go.mod h1:58tAw+r41eczKabKA5KwwLabGlO6YLbtkYtKif9sDr0= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= diff --git a/hubble/Makefile b/hubble/Makefile index 4e002741637ac..45cd088066467 100644 --- a/hubble/Makefile +++ b/hubble/Makefile @@ -1,40 +1,40 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright Authors of Hubble +.DEFAULT_GOAL := all + CURR_DIR := $(shell dirname "$(realpath $(lastword $(MAKEFILE_LIST)))") include $(CURR_DIR)/../Makefile.defs # Add the ability to override variables --include Makefile.override +-include $(CURR_DIR)/Makefile.override TARGET := hubble +.PHONY: all all: $(TARGET) -.PHONY: all $(TARGET) release local-release clean - - SUBDIRS_HUBBLE_CLI := . TARGET_DIR=. # homebrew uses the github release's tarball of the source that does not contain the '.git' directory. GIT_BRANCH = $(shell command -v git >/dev/null 2>&1 && git rev-parse --abbrev-ref HEAD 2> /dev/null) GIT_HASH = $(shell command -v git >/dev/null 2>&1 && git rev-parse --short HEAD 2> /dev/null) -GOOS ?= -GOARCH ?= - GO_BUILD_LDFLAGS += -X "github.com/cilium/cilium/hubble/pkg.GitBranch=$(GIT_BRANCH)" \ -X "github.com/cilium/cilium/hubble/pkg.GitHash=$(GIT_HASH)" \ -X "github.com/cilium/cilium/hubble/pkg.Version=v$(VERSION)" +.PHONY: $(TARGET) $(TARGET): - GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO_BUILD) -o $(TARGET_DIR)/$(@)$(EXT) $(SUBDIRS_HUBBLE_CLI) + $(GO_BUILD) -o $(TARGET_DIR)/$(@)$(EXT) $(SUBDIRS_HUBBLE_CLI) +.PHONY: release release: cd $(CURR_DIR)/../ && \ $(CONTAINER_ENGINE) run --rm --workdir /cilium --volume `pwd`:/cilium --user "$(shell id -u):$(shell id -g)" \ $(CILIUM_BUILDER_IMAGE) sh -c "$(MAKE) -C $(TARGET) local-release" +.PHONY: local-release local-release: clean set -o errexit; \ for OS in darwin linux windows; do \ @@ -62,5 +62,22 @@ local-release: clean rm -r release/$$OS; \ done; +.PHONY: install +install: install-binary install-bash-completion + +.PHONY: install-binary +install-binary: $(TARGET) + $(QUIET)$(INSTALL) -m 0755 -d $(DESTDIR)$(BINDIR) + $(QUIET)$(INSTALL) -m 0755 $(TARGET) $(DESTDIR)$(BINDIR) + +.PHONY: install-bash-completion +install-bash-completion: $(TARGET) + $(QUIET)$(INSTALL) -m 0755 -d $(DESTDIR)$(CONFDIR)/bash_completion.d + ./$(TARGET) completion bash > $(TARGET)_bash_completion + $(QUIET)$(INSTALL) -m 0644 -T $(TARGET)_bash_completion $(DESTDIR)$(CONFDIR)/bash_completion.d/$(TARGET) + +.PHONY: clean clean: - rm -f $(TARGET) + @$(ECHO_CLEAN) + -$(QUIET)rm -f $(TARGET) + $(QUIET)$(GO_CLEAN) diff --git a/hubble/cmd/cli_test.go b/hubble/cmd/cli_test.go index 218da5678c196..f233dcda23528 100644 --- a/hubble/cmd/cli_test.go +++ b/hubble/cmd/cli_test.go @@ -8,6 +8,7 @@ import ( "context" _ "embed" "fmt" + "strings" "testing" "github.com/spf13/viper" @@ -41,6 +42,12 @@ denylist: - '{"source_ip":["1.1.1.1"]}' ` +func normalizeNewlines(content string) string { + content = strings.ReplaceAll(content, "\r\n", "\n") + content = strings.ReplaceAll(content, "\r", "\n") + return content +} + func TestTestHubbleObserve(t *testing.T) { tests := []struct { name string @@ -116,7 +123,7 @@ Use "hubble [command] --help" for more information about a command. cli.SetArgs(tt.args) err := cli.Execute() require.Equal(t, tt.expectErr, err) - output := b.String() + output := normalizeNewlines(b.String()) if tt.expectedOutput != "" { assert.Equal(t, tt.expectedOutput, output, "expected output does not match") } diff --git a/hubble/cmd/observe/flows.go b/hubble/cmd/observe/flows.go index 7eb4bd5a703ef..65194002e725f 100644 --- a/hubble/cmd/observe/flows.go +++ b/hubble/cmd/observe/flows.go @@ -420,6 +420,10 @@ func newFlowsCmdHelper(usage cmdUsage, vp *viper.Viper, ofilter *flowFilter) *co "trace-id", ofilter, "Show only flows which match this trace ID")) + filterFlags.Var(filterVar( + "ip-trace-id", ofilter, + "Show only flows which match this IP trace ID")) + filterFlags.Var(filterVar( "from-fqdn", ofilter, `Show all flows originating at the given fully qualified domain name (e.g. "*.cilium.io").`)) diff --git a/hubble/cmd/observe/flows_filter.go b/hubble/cmd/observe/flows_filter.go index f9f0bb2d6a1af..da3dfcd22fb21 100644 --- a/hubble/cmd/observe/flows_filter.go +++ b/hubble/cmd/observe/flows_filter.go @@ -514,6 +514,18 @@ func (of *flowFilter) set(f *filterTracker, name, val string, track bool) error f.TraceId = append(f.GetTraceId(), val) }) + case "ip-trace-id": + if val == "0" { + return fmt.Errorf("invalid --ip-trace-id value; must be greater than 0") + } + traceID, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return fmt.Errorf("invalid --ip-trace-id value: %w", err) + } + f.apply(func(f *flowpb.FlowFilter) { + f.IpTraceId = append(f.GetIpTraceId(), traceID) + }) + case "verdict": if wipe { f.apply(func(f *flowpb.FlowFilter) { diff --git a/hubble/cmd/observe/flows_filter_test.go b/hubble/cmd/observe/flows_filter_test.go index 73b0d09e6711d..31c90f9cf3d8d 100644 --- a/hubble/cmd/observe/flows_filter_test.go +++ b/hubble/cmd/observe/flows_filter_test.go @@ -1129,6 +1129,55 @@ func TestCluster(t *testing.T) { } } +func TestIpTraceId(t *testing.T) { + tt := []struct { + name string + flags []string + filters []*flowpb.FlowFilter + err string + }{ + { + name: "error", + flags: []string{"--ip-trace-id", "0"}, + filters: []*flowpb.FlowFilter{}, + err: "invalid --ip-trace-id value; must be greater than 0", + }, + { + name: "single", + flags: []string{"--ip-trace-id", "1"}, + filters: []*flowpb.FlowFilter{ + {IpTraceId: []uint64{1}}, + }, + }, + { + name: "multiple", + flags: []string{"--ip-trace-id", "1", "--ip-trace-id", "2"}, + filters: []*flowpb.FlowFilter{ + {IpTraceId: []uint64{1, 2}}, + }, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + f := newFlowFilter() + cmd := newFlowsCmdWithFilter(viper.New(), f) + err := cmd.Flags().Parse(tc.flags) + if tc.err != "" { + require.Errorf(t, err, tc.err) + return + } else { + require.NoError(t, err) + } + assert.Nil(t, f.blacklist) + got := f.whitelist.flowFilters() + diff := cmp.Diff(tc.filters, got, cmpopts.IgnoreUnexported(flowpb.FlowFilter{})) + if diff != "" { + t.Errorf("mismatch (-want +got):\n%s", diff) + } + }) + } +} + func TestCELExpression(t *testing.T) { tt := []struct { name string diff --git a/hubble/cmd/observe_help.txt b/hubble/cmd/observe_help.txt index bd78b7ae4d497..7b3fb82164238 100644 --- a/hubble/cmd/observe_help.txt +++ b/hubble/cmd/observe_help.txt @@ -62,6 +62,7 @@ Filters Flags: --identity filter Show all flows related to an endpoint with the given security identity --interface filter Show all flows observed at the given interface name (e.g. eth0) --ip filter Show all flows originating or terminating at the given IP address. Each of the IPs can be specified as an exact match (e.g. '1.1.1.1') or as a CIDR range (e.g.'1.1.1.0/24'). + --ip-trace-id filter Show only flows which match this IP trace ID --ip-version filter Show only IPv4, IPv6 flows or non IP flows (e.g. ARP packets) (ie: "none", "v4", "v6") -4, --ipv4 filter[=v4] Show only IPv4 flows -6, --ipv6 filter[=v6] Show only IPv6 flows @@ -156,15 +157,15 @@ Server Flags: --request-timeout duration Unary Request timeout. Only applies to non-streaming RPCs (ServerStatus, ListNodes, ListNamespaces). (default 12s) --server string Address of a Hubble server. Ignored when --input-file or --port-forward is provided. (default "localhost:4245") --timeout duration Hubble server dialing timeout (default 5s) - --tls Specify that TLS must be used when establishing a connection to a Hubble server. + --tls Specify that TLS must be used when establishing a connection to a Hubble server. By default, TLS is only enabled if the server address starts with 'tls://'. - --tls-allow-insecure Allows the client to skip verifying the server's certificate chain and host name. - This option is NOT recommended as, in this mode, TLS is susceptible to machine-in-the-middle attacks. + --tls-allow-insecure Allows the client to skip verifying the server's certificate chain and host name. + This option is NOT recommended as, in this mode, TLS is susceptible to machine-in-the-middle attacks. See also the 'tls-server-name' option which allows setting the server name. --tls-ca-cert-files strings Paths to custom Certificate Authority (CA) certificate files.The files must contain PEM encoded data. - --tls-client-cert-file string Path to the public key file for the client certificate to connect to a Hubble server (implies TLS). + --tls-client-cert-file string Path to the public key file for the client certificate to connect to a Hubble server (implies TLS). The file must contain PEM encoded data. - --tls-client-key-file string Path to the private key file for the client certificate to connect a Hubble server (implies TLS). + --tls-client-key-file string Path to the private key file for the client certificate to connect a Hubble server (implies TLS). The file must contain PEM encoded data. --tls-server-name string Specify a server name to verify the hostname on the returned certificate (eg: 'instance.hubble-relay.cilium.io'). diff --git a/hubble/pkg/defaults/defaults.go b/hubble/pkg/defaults/defaults.go index e07685e4bbdc5..402115762a8e6 100644 --- a/hubble/pkg/defaults/defaults.go +++ b/hubble/pkg/defaults/defaults.go @@ -65,6 +65,7 @@ var ( "destination_service", "is_reply", "Summary", + "ip_trace_id", } ) diff --git a/hubble/pkg/printer/printer.go b/hubble/pkg/printer/printer.go index bee0abf5cc76b..4a611e38d9db1 100644 --- a/hubble/pkg/printer/printer.go +++ b/hubble/pkg/printer/printer.go @@ -280,12 +280,17 @@ func formatPolicyNames(policies []*flowpb.Policy) string { } func (p Printer) getSummary(f *flowpb.Flow) string { - auth := p.getAuth(f) - if auth == "" { - return f.GetSummary() + b := strings.Builder{} + if f.IpTraceId != nil && f.IpTraceId.TraceId > 0 { + b.WriteString(fmt.Sprintf("IP Trace ID: %d; ", f.IpTraceId.TraceId)) } + b.WriteString(f.GetSummary()) - return fmt.Sprintf("%s; Auth: %s", f.GetSummary(), auth) + if auth := p.getAuth(f); auth != "" { + b.WriteString("; Auth: " + auth) + } + + return b.String() } func (p Printer) getAuth(f *flowpb.Flow) string { @@ -943,6 +948,7 @@ func (p *Printer) WriteLostEvent(res *observerpb.GetFlowsResponse) error { src := f.GetSource() numEventsLost := f.GetNumEventsLost() cpu := f.GetCpu() + first, last := f.GetFirst(), f.GetLast() if p.line == 0 { w.print("TIMESTAMP", tab) @@ -957,16 +963,24 @@ func (p *Printer) WriteLostEvent(res *observerpb.GetFlowsResponse) error { "SUMMARY", newline, ) } - w.print("", tab) + w.print(fmtTimestamp(p.opts.timeFormat, res.GetTime()), tab) if p.opts.nodeName { - w.print("", tab) + nodeName := res.GetNodeName() + if nodeName == "" { + nodeName = "-" + } + w.print(nodeName, tab) + } + summary := fmt.Sprintf("CPU(%d) - %d", cpu.GetValue(), numEventsLost) + if first != nil && last != nil { + summary += fmt.Sprintf(" (first: %s, last: %s)", fmtTimestamp(p.opts.timeFormat, first), fmtTimestamp(p.opts.timeFormat, last)) } w.print( src, tab, - "", tab, + "-", tab, "EVENTS LOST", tab, - "", tab, - fmt.Sprintf("CPU(%d) - %d", cpu.GetValue(), numEventsLost), newline, + "-", tab, + summary, newline, ) if w.err != nil { return fmt.Errorf("failed to write out packet: %w", w.err) @@ -976,6 +990,7 @@ func (p *Printer) WriteLostEvent(res *observerpb.GetFlowsResponse) error { src := f.GetSource() numEventsLost := f.GetNumEventsLost() cpu := f.GetCpu() + first, last := f.GetFirst(), f.GetLast() if p.line != 0 { // TODO: line length? w.print(dictSeparator, newline) @@ -983,15 +998,23 @@ func (p *Printer) WriteLostEvent(res *observerpb.GetFlowsResponse) error { // this is a little crude, but will do for now. should probably find the // longest header and auto-format the keys - w.print(" TIMESTAMP: ", "", newline) + w.print(" TIMESTAMP: ", fmtTimestamp(p.opts.timeFormat, res.GetTime()), newline) if p.opts.nodeName { - w.print(" NODE: ", "", newline) + nodeName := res.GetNodeName() + if nodeName == "" { + nodeName = "-" + } + w.print(" NODE: ", nodeName, newline) + } + summary := fmt.Sprintf("CPU(%d) - %d", cpu.GetValue(), numEventsLost) + if first != nil && last != nil { + summary += fmt.Sprintf(" (first: %s, last: %s)", fmtTimestamp(p.opts.timeFormat, first), fmtTimestamp(p.opts.timeFormat, last)) } w.print( " SOURCE: ", src, newline, " TYPE: ", "EVENTS LOST", newline, - " VERDICT: ", "", newline, - " SUMMARY: ", fmt.Sprintf("CPU(%d) - %d", cpu.GetValue(), numEventsLost), newline, + " VERDICT: ", "-", newline, + " SUMMARY: ", summary, newline, ) if w.err != nil { return fmt.Errorf("failed to write out packet: %w", w.err) @@ -1001,11 +1024,16 @@ func (p *Printer) WriteLostEvent(res *observerpb.GetFlowsResponse) error { src := f.GetSource() numEventsLost := f.GetNumEventsLost() cpu := f.GetCpu() + first, last := f.GetFirst(), f.GetLast() - w.printf("EVENTS LOST: %s CPU(%d) %d\n", + summary := fmt.Sprintf("CPU(%d) %d", cpu.GetValue(), numEventsLost) + if first != nil && last != nil { + summary += fmt.Sprintf(" (first: %s, last: %s)", fmtTimestamp(p.opts.timeFormat, first), fmtTimestamp(p.opts.timeFormat, last)) + } + w.printf("%s EVENTS LOST: %s %s\n", + fmtTimestamp(p.opts.timeFormat, res.GetTime()), src, - cpu.GetValue(), - numEventsLost, + summary, ) if w.err != nil { return fmt.Errorf("failed to write out packet: %w", w.err) diff --git a/hubble/pkg/printer/printer_test.go b/hubble/pkg/printer/printer_test.go index 1bd83d5827fe4..de14d037dcdbf 100644 --- a/hubble/pkg/printer/printer_test.go +++ b/hubble/pkg/printer/printer_test.go @@ -155,7 +155,8 @@ func TestPrinter_WriteProtoFlow(t *testing.T) { policyAllowed.IngressAllowedBy = []*flowpb.Policy{{Name: "my-policy", Namespace: "my-policy-namespace", Kind: "CiliumNetworkPolicy"}, {Name: "my-policy-2", Kind: "CiliumClusterwideNetworkPolicy"}} type args struct { - f *flowpb.Flow + f *flowpb.Flow + merge *flowpb.Flow } tests := []struct { name string @@ -235,6 +236,26 @@ Jan 1 00:20:34.567 1.1.1.1:31793 2.2.2.2:8080 kafka-request DROPPED K "1.1.1.1:31793 (health) -> 2.2.2.2:8080 (ID:12345) " + "Policy denied DROPPED (TCP Flags: SYN)\n", }, + { + name: "compact-with-trace-id", + options: []Option{ + Compact(), + WithColor("never"), + WithNodeName(), + Writer(&buf), + }, + args: args{ + f: &f, + merge: &flowpb.Flow{ + IpTraceId: &flowpb.IPTraceID{TraceId: 1234}, + }, + }, + wantErr: false, + expected: "Jan 1 00:20:34.567 [k8s1]: " + + "1.1.1.1:31793 (health) -> 2.2.2.2:8080 (ID:12345) " + + "Policy denied DROPPED " + + "(IP Trace ID: 1234; TCP Flags: SYN)\n", + }, { name: "compact-reply", options: []Option{ @@ -408,6 +429,33 @@ Jan 1 00:20:34.567 1.1.1.1:31793 2.2.2.2:8080 kafka-request DROPPED K `"event_type":{"type":1,"sub_type":133},` + `"is_reply":false,"Summary":"Kafka request 1234 correlation id 1 topic 'my-topic[^[30mblack[^[0m[^\\r'"}}`, }, + { + name: "jsonpb_with_trace", + options: []Option{ + JSONPB(), + WithColor("never"), + Writer(&buf), + }, + args: args{ + f: &f, + merge: &flowpb.Flow{ + IpTraceId: &flowpb.IPTraceID{ + IpOptionType: 136, + TraceId: 1234, + }, + }, + }, + wantErr: false, + expected: `{"flow":{"time":"1970-01-01T00:20:34.567800Z",` + + `"verdict":"DROPPED",` + + `"IP":{"source":"1.1.1.1","destination":"2.2.2.2"},` + + `"l4":{"TCP":{"source_port":31793,"destination_port":8080}},` + + `"source":{"identity":4},"destination":{"identity":12345},` + + `"Type":"L3_L4","node_name":"k8s1",` + + `"event_type":{"type":1,"sub_type":133},` + + `"ip_trace_id":{"trace_id":"1234","ip_option_type":136},` + + `"is_reply":false,"Summary":"TCP Flags: SYN"}}`, + }, { name: "dict", options: []Option{ @@ -487,9 +535,12 @@ DESTINATION: 2.2.2.2:8080 for _, tt := range tests { buf.Reset() t.Run(tt.name, func(t *testing.T) { + f := proto.Clone(tt.args.f).(*flowpb.Flow) + proto.Merge(f, tt.args.merge) + p := New(tt.options...) res := &observerpb.GetFlowsResponse{ - ResponseTypes: &observerpb.GetFlowsResponse_Flow{Flow: tt.args.f}, + ResponseTypes: &observerpb.GetFlowsResponse_Flow{Flow: f}, } // writes a node status event into the error stream if err := p.WriteProtoFlow(res); (err != nil) != tt.wantErr { @@ -1607,6 +1658,24 @@ NUM CONNECTED NODES: N/A func TestPrinter_WriteLostEventsResponse(t *testing.T) { buf := bytes.Buffer{} gfr := &observerpb.GetFlowsResponse{ + Time: ×tamppb.Timestamp{ + Seconds: 1234, + Nanos: 567800000, + }, + ResponseTypes: &observerpb.GetFlowsResponse_LostEvents{ + LostEvents: &observerpb.LostEvent{ + Source: observerpb.LostEventSource_HUBBLE_RING_BUFFER, + NumEventsLost: 1, + Cpu: wrapperspb.Int32(5), + }, + }, + } + gfrWithNode := &observerpb.GetFlowsResponse{ + Time: ×tamppb.Timestamp{ + Seconds: 1234, + Nanos: 567800000, + }, + NodeName: "node-name", ResponseTypes: &observerpb.GetFlowsResponse_LostEvents{ LostEvents: &observerpb.LostEvent{ Source: observerpb.LostEventSource_HUBBLE_RING_BUFFER, @@ -1615,6 +1684,27 @@ func TestPrinter_WriteLostEventsResponse(t *testing.T) { }, }, } + gfrWithTimestamps := &observerpb.GetFlowsResponse{ + Time: ×tamppb.Timestamp{ + Seconds: 1234, + Nanos: 567800000, + }, + ResponseTypes: &observerpb.GetFlowsResponse_LostEvents{ + LostEvents: &observerpb.LostEvent{ + Source: observerpb.LostEventSource_HUBBLE_RING_BUFFER, + NumEventsLost: 1, + Cpu: wrapperspb.Int32(5), + First: ×tamppb.Timestamp{ + Seconds: 1230, + Nanos: 567800000, + }, + Last: ×tamppb.Timestamp{ + Seconds: 1238, + Nanos: 567800000, + }, + }, + }, + } type args struct { le *observerpb.GetFlowsResponse } @@ -1622,7 +1712,6 @@ func TestPrinter_WriteLostEventsResponse(t *testing.T) { name string options []Option args args - wantErr bool expected string }{ { @@ -1631,23 +1720,23 @@ func TestPrinter_WriteLostEventsResponse(t *testing.T) { WithColor("never"), Writer(&buf), }, - args: args{gfr}, - wantErr: false, + args: args{gfr}, expected: ` -TIMESTAMP SOURCE DESTINATION TYPE VERDICT SUMMARY - HUBBLE_RING_BUFFER EVENTS LOST CPU(5) - 1`, - }, { +TIMESTAMP SOURCE DESTINATION TYPE VERDICT SUMMARY +Jan 1 00:20:34.567 HUBBLE_RING_BUFFER - EVENTS LOST - CPU(5) - 1`, + }, + { name: "compact", options: []Option{ Compact(), WithColor("never"), Writer(&buf), }, - args: args{gfr}, - wantErr: false, + args: args{gfr}, expected: ` -EVENTS LOST: HUBBLE_RING_BUFFER CPU(5) 1`, - }, { +Jan 1 00:20:34.567 EVENTS LOST: HUBBLE_RING_BUFFER CPU(5) 1`, + }, + { name: "json", options: []Option{ JSONPB(), @@ -1655,9 +1744,9 @@ EVENTS LOST: HUBBLE_RING_BUFFER CPU(5) 1`, Writer(&buf), }, args: args{gfr}, - wantErr: false, - expected: `{"lost_events":{"source":"HUBBLE_RING_BUFFER","num_events_lost":"1","cpu":5}}`, - }, { + expected: `{"lost_events":{"source":"HUBBLE_RING_BUFFER","num_events_lost":"1","cpu":5},"time":"1970-01-01T00:20:34.567800Z"}`, + }, + { name: "jsonpb", options: []Option{ JSONPB(), @@ -1665,32 +1754,152 @@ EVENTS LOST: HUBBLE_RING_BUFFER CPU(5) 1`, Writer(&buf), }, args: args{gfr}, - wantErr: false, - expected: `{"lost_events":{"source":"HUBBLE_RING_BUFFER","num_events_lost":"1","cpu":5}}`, - }, { + expected: `{"lost_events":{"source":"HUBBLE_RING_BUFFER","num_events_lost":"1","cpu":5},"time":"1970-01-01T00:20:34.567800Z"}`, + }, + { name: "dict", options: []Option{ Dict(), WithColor("never"), Writer(&buf), }, - args: args{gfr}, - wantErr: false, + args: args{gfr}, expected: ` - TIMESTAMP: + TIMESTAMP: Jan 1 00:20:34.567 SOURCE: HUBBLE_RING_BUFFER TYPE: EVENTS LOST - VERDICT: + VERDICT: - SUMMARY: CPU(5) - 1`, }, + // with node name + { + name: "tabular with node", + options: []Option{ + WithColor("never"), + WithNodeName(), + Writer(&buf), + }, + args: args{gfrWithNode}, + expected: ` +TIMESTAMP NODE SOURCE DESTINATION TYPE VERDICT SUMMARY +Jan 1 00:20:34.567 node-name HUBBLE_RING_BUFFER - EVENTS LOST - CPU(5) - 1`, + }, + { + name: "compact with node", + options: []Option{ + Compact(), + WithColor("never"), + WithNodeName(), + Writer(&buf), + }, + args: args{gfrWithNode}, + expected: ` +Jan 1 00:20:34.567 EVENTS LOST: HUBBLE_RING_BUFFER CPU(5) 1`, + }, + { + name: "json with node", + options: []Option{ + JSONPB(), + WithColor("never"), + WithNodeName(), + Writer(&buf), + }, + args: args{gfrWithNode}, + expected: `{"lost_events":{"source":"HUBBLE_RING_BUFFER","num_events_lost":"1","cpu":5},"node_name":"node-name","time":"1970-01-01T00:20:34.567800Z"}`, + }, + { + name: "jsonpb with node", + options: []Option{ + JSONPB(), + WithColor("never"), + WithNodeName(), + Writer(&buf), + }, + args: args{gfrWithNode}, + expected: `{"lost_events":{"source":"HUBBLE_RING_BUFFER","num_events_lost":"1","cpu":5},"node_name":"node-name","time":"1970-01-01T00:20:34.567800Z"}`, + }, + { + name: "dict with node", + options: []Option{ + Dict(), + WithColor("never"), + WithNodeName(), + Writer(&buf), + }, + args: args{gfrWithNode}, + expected: ` + TIMESTAMP: Jan 1 00:20:34.567 + NODE: node-name + SOURCE: HUBBLE_RING_BUFFER + TYPE: EVENTS LOST + VERDICT: - + SUMMARY: CPU(5) - 1`, + }, + // with lost event timestamps + { + name: "tabular with timestamps", + options: []Option{ + WithColor("never"), + Writer(&buf), + }, + args: args{gfrWithTimestamps}, + expected: ` +TIMESTAMP SOURCE DESTINATION TYPE VERDICT SUMMARY +Jan 1 00:20:34.567 HUBBLE_RING_BUFFER - EVENTS LOST - CPU(5) - 1 (first: Jan 1 00:20:30.567, last: Jan 1 00:20:38.567)`, + }, + { + name: "compact with timestamps", + options: []Option{ + Compact(), + WithColor("never"), + Writer(&buf), + }, + args: args{gfrWithTimestamps}, + expected: ` +Jan 1 00:20:34.567 EVENTS LOST: HUBBLE_RING_BUFFER CPU(5) 1 (first: Jan 1 00:20:30.567, last: Jan 1 00:20:38.567)`, + }, + { + name: "json with timestamps", + options: []Option{ + JSONPB(), + WithColor("never"), + Writer(&buf), + }, + args: args{gfrWithTimestamps}, + expected: `{"lost_events":{"source":"HUBBLE_RING_BUFFER","num_events_lost":"1","cpu":5,"first":"1970-01-01T00:20:30.567800Z","last":"1970-01-01T00:20:38.567800Z"},"time":"1970-01-01T00:20:34.567800Z"}`, + }, + { + name: "jsonpb with timestamps", + options: []Option{ + JSONPB(), + WithColor("never"), + Writer(&buf), + }, + args: args{gfrWithTimestamps}, + expected: `{"lost_events":{"source":"HUBBLE_RING_BUFFER","num_events_lost":"1","cpu":5,"first":"1970-01-01T00:20:30.567800Z","last":"1970-01-01T00:20:38.567800Z"},"time":"1970-01-01T00:20:34.567800Z"}`, + }, + { + name: "dict with timestamps", + options: []Option{ + Dict(), + WithColor("never"), + Writer(&buf), + }, + args: args{gfrWithTimestamps}, + expected: ` + TIMESTAMP: Jan 1 00:20:34.567 + SOURCE: HUBBLE_RING_BUFFER + TYPE: EVENTS LOST + VERDICT: - + SUMMARY: CPU(5) - 1 (first: Jan 1 00:20:30.567, last: Jan 1 00:20:38.567)`, + }, } for _, tt := range tests { buf.Reset() t.Run(tt.name, func(t *testing.T) { p := New(tt.options...) - if err := p.WriteLostEvent(tt.args.le); (err != nil) != tt.wantErr { - t.Errorf("WriteServerStatusResponse() error = %v, wantErr %v", err, tt.wantErr) - } + err := p.WriteLostEvent(tt.args.le) + require.NoError(t, err) require.NoError(t, p.Close()) require.Equal(t, strings.TrimSpace(tt.expected), strings.TrimSpace(buf.String())) }) diff --git a/images/builder/Dockerfile b/images/builder/Dockerfile index 1935fb5aff277..09f7415138b8e 100644 --- a/images/builder/Dockerfile +++ b/images/builder/Dockerfile @@ -4,9 +4,9 @@ # SPDX-License-Identifier: Apache-2.0 ARG COMPILERS_IMAGE=quay.io/cilium/image-compilers:1755973935-c5bf38b@sha256:448b126455a301c48d4189bfa9335f44d7d4524d69dcbc8151013ecb6ac0f042 -ARG CILIUM_RUNTIME_IMAGE=quay.io/cilium/cilium-runtime:22e31ca0018cad492dcb99bc08368dc11fac28de@sha256:ae2f5413d4cbea8bfa2c6d05f499b5a6a3512bddf1844bb059e1a8b57b1137de +ARG CILIUM_RUNTIME_IMAGE=quay.io/cilium/cilium-runtime:061e77638353be08d1a792ef38b8f4597344139d@sha256:db098a1d65275c3d3f8090706feaab9a30c437fb24942b25ffdea06a010c5bae ARG TESTER_IMAGE=quay.io/cilium/image-tester:1755531540-60ee83e@sha256:1b05459281d6613acb8573b2cd7f488a0d224eebf031770babbfc6a294501cf7 -ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:a5e935dbd8bc3a5ea24388e376388c9a69b40628b6788a81658a801abbec8f2e +ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 ARG CILIUM_LLVM_IMAGE=quay.io/cilium/cilium-llvm:1755027449-dfe66ec@sha256:6a8632f1772dee0f1f287dd84f4010347c19f350347487603f16210674a3a787 FROM ${COMPILERS_IMAGE} AS compilers-image diff --git a/images/builder/install-protoc.sh b/images/builder/install-protoc.sh index 202f539bcac91..f7db2d8f4fb05 100755 --- a/images/builder/install-protoc.sh +++ b/images/builder/install-protoc.sh @@ -9,7 +9,7 @@ set -o pipefail set -o nounset # renovate: datasource=github-release-attachments depName=protocolbuffers/protobuf -protoc_version="v32.0" +protoc_version="v32.1" protoc_ersion="${protoc_version//v/}" arch=$(arch) if [[ "${arch}" == "aarch64" ]]; then diff --git a/images/cilium-docker-plugin/Dockerfile b/images/cilium-docker-plugin/Dockerfile index 3951ff7b4a479..4e156b1c7d6af 100644 --- a/images/cilium-docker-plugin/Dockerfile +++ b/images/cilium-docker-plugin/Dockerfile @@ -4,7 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 ARG BASE_IMAGE=scratch -ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:a5e935dbd8bc3a5ea24388e376388c9a69b40628b6788a81658a801abbec8f2e +ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 # BUILDPLATFORM is an automatic platform ARG enabled by Docker BuildKit. # Represents the plataform where the build is happening, do not mix with diff --git a/images/cilium/Dockerfile b/images/cilium/Dockerfile index 79542cf16a135..be1b6894deb84 100644 --- a/images/cilium/Dockerfile +++ b/images/cilium/Dockerfile @@ -3,8 +3,8 @@ # Copyright Authors of Cilium # SPDX-License-Identifier: Apache-2.0 -ARG CILIUM_BUILDER_IMAGE=quay.io/cilium/cilium-builder:dce64df3892bec24c003f97edcb8aae51640d97d@sha256:dbcda5ef242a9f5f138b733dbf22437c4134106dad6127041653f947b81e5367 -ARG CILIUM_RUNTIME_IMAGE=quay.io/cilium/cilium-runtime:22e31ca0018cad492dcb99bc08368dc11fac28de@sha256:ae2f5413d4cbea8bfa2c6d05f499b5a6a3512bddf1844bb059e1a8b57b1137de +ARG CILIUM_BUILDER_IMAGE=quay.io/cilium/cilium-builder:f5e2620e352d7bc93f5fa2e2309786102e73c014@sha256:5afb2abd71a4725003c967ee47d9555df10da1e754c07ffc48dfa8e615b83fd5 +ARG CILIUM_RUNTIME_IMAGE=quay.io/cilium/cilium-runtime:061e77638353be08d1a792ef38b8f4597344139d@sha256:db098a1d65275c3d3f8090706feaab9a30c437fb24942b25ffdea06a010c5bae # # cilium-envoy from github.com/cilium/proxy # @@ -52,11 +52,7 @@ RUN --mount=type=bind,readwrite,target=/go/src/github.com/cilium/cilium \ # bash_completion is the same for both architectures. make GOARCH=${BUILDARCH} DESTDIR=/tmp/install/${TARGETOS}/${TARGETARCH} PKG_BUILD=1 $(echo $MODIFIERS | tr -d '"') \ install-bash-completion licenses-all && \ - mv LICENSE.all /tmp/install/${TARGETOS}/${TARGETARCH}/LICENSE.all && \ - mkdir -p /tmp/hubble/${TARGETOS}/${TARGETARCH} && \ - cd hubble && \ - make GOOS=${TARGETOS} GOARCH=${TARGETARCH} $(echo $MODIFIERS | tr -d '"') && \ - mv hubble /tmp/hubble/${TARGETOS}/${TARGETARCH}/hubble + mv LICENSE.all /tmp/install/${TARGETOS}/${TARGETARCH}/LICENSE.all # Extract debug symbols to /tmp/debug and strip the binaries if NOSTRIP is not set. # Use the appropriate objcopy for the target architecture. @@ -109,8 +105,6 @@ COPY --from=cilium-envoy /usr/bin/cilium-envoy /usr/bin/cilium-envoy-starter /us # local unix domain socket instead of Hubble Relay. ENV HUBBLE_SERVER=unix:///var/run/cilium/hubble.sock COPY --from=builder /tmp/install/${TARGETOS}/${TARGETARCH} / -COPY --from=builder /tmp/hubble/${TARGETOS}/${TARGETARCH}/hubble /usr/bin/hubble -RUN /usr/bin/hubble completion bash > /etc/bash_completion.d/hubble WORKDIR /home/cilium ENV INITSYSTEM="SYSTEMD" diff --git a/images/clustermesh-apiserver/Dockerfile b/images/clustermesh-apiserver/Dockerfile index 05861982145fe..e560efc18f6e9 100644 --- a/images/clustermesh-apiserver/Dockerfile +++ b/images/clustermesh-apiserver/Dockerfile @@ -12,7 +12,7 @@ ARG BASE_IMAGE=gcr.io/distroless/static:nonroot@sha256:a9f88e0d99c1ceedbce565fad # chain attacks where an attacker has write access to our 3rd party dependency image registries. # 2. These digests must be to the *overall* digest, not the digest for a specific image. This is because the images will # be architecture specific, but the overall digest will contain all of the architectures. -ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:a5e935dbd8bc3a5ea24388e376388c9a69b40628b6788a81658a801abbec8f2e +ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 # We don't use ETCD_IMAGE because that's used in Makefile.defs to select a ETCD image approrpate for the *host platform* # to run tests with. ARG ETCD_SERVER_IMAGE=gcr.io/etcd-development/etcd:v3.6.4@sha256:5d10878e4fd4ebfdf82bc142fb044542a3ca514c0ee169277643a84d6816892a diff --git a/images/hubble-relay/Dockerfile b/images/hubble-relay/Dockerfile index c411f6a818a32..2705b128398e4 100644 --- a/images/hubble-relay/Dockerfile +++ b/images/hubble-relay/Dockerfile @@ -6,8 +6,8 @@ # distroless images are signed by cosign and can be verified using: # $ cosign verify $IMAGE_NAME --certificate-oidc-issuer https://accounts.google.com --certificate-identity keyless@distroless.iam.gserviceaccount.com ARG BASE_IMAGE=gcr.io/distroless/static:nonroot@sha256:a9f88e0d99c1ceedbce565fad7d3f96744d15e6919c19c7dafe84a6dd9a80c61 -ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:a5e935dbd8bc3a5ea24388e376388c9a69b40628b6788a81658a801abbec8f2e -ARG CILIUM_BUILDER_IMAGE=quay.io/cilium/cilium-builder:dce64df3892bec24c003f97edcb8aae51640d97d@sha256:dbcda5ef242a9f5f138b733dbf22437c4134106dad6127041653f947b81e5367 +ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 +ARG CILIUM_BUILDER_IMAGE=quay.io/cilium/cilium-builder:f5e2620e352d7bc93f5fa2e2309786102e73c014@sha256:5afb2abd71a4725003c967ee47d9555df10da1e754c07ffc48dfa8e615b83fd5 # BUILDPLATFORM is an automatic platform ARG enabled by Docker BuildKit. # Represents the plataform where the build is happening, do not mix with diff --git a/images/operator/Dockerfile b/images/operator/Dockerfile index cae64a9c891dd..7af24e3c051df 100644 --- a/images/operator/Dockerfile +++ b/images/operator/Dockerfile @@ -4,9 +4,9 @@ # SPDX-License-Identifier: Apache-2.0 ARG BASE_IMAGE=scratch -ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:a5e935dbd8bc3a5ea24388e376388c9a69b40628b6788a81658a801abbec8f2e +ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 ARG ALPINE_IMAGE=docker.io/library/alpine:3.22.1@sha256:4bcff63911fcb4448bd4fdacec207030997caf25e9bea4045fa6c8c44de311d1 -ARG CILIUM_BUILDER_IMAGE=quay.io/cilium/cilium-builder:dce64df3892bec24c003f97edcb8aae51640d97d@sha256:dbcda5ef242a9f5f138b733dbf22437c4134106dad6127041653f947b81e5367 +ARG CILIUM_BUILDER_IMAGE=quay.io/cilium/cilium-builder:f5e2620e352d7bc93f5fa2e2309786102e73c014@sha256:5afb2abd71a4725003c967ee47d9555df10da1e754c07ffc48dfa8e615b83fd5 # BUILDPLATFORM is an automatic platform ARG enabled by Docker BuildKit. # Represents the plataform where the build is happening, do not mix with diff --git a/images/runtime/Dockerfile b/images/runtime/Dockerfile index 4ccdfcf2167f3..384fbaf5064ee 100644 --- a/images/runtime/Dockerfile +++ b/images/runtime/Dockerfile @@ -3,7 +3,7 @@ # Copyright Authors of Cilium # SPDX-License-Identifier: Apache-2.0 -ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:a5e935dbd8bc3a5ea24388e376388c9a69b40628b6788a81658a801abbec8f2e +ARG GOLANG_IMAGE=docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 ARG UBUNTU_IMAGE=docker.io/library/ubuntu:24.04@sha256:9cbed754112939e914291337b5e554b07ad7c392491dba6daf25eef1332a22e8 ARG CILIUM_LLVM_IMAGE=quay.io/cilium/cilium-llvm:1755027449-dfe66ec@sha256:6a8632f1772dee0f1f287dd84f4010347c19f350347487603f16210674a3a787 diff --git a/install/kubernetes/Makefile.values b/install/kubernetes/Makefile.values index c54167e3dd51c..1cf68c6198810 100644 --- a/install/kubernetes/Makefile.values +++ b/install/kubernetes/Makefile.values @@ -33,7 +33,7 @@ export CERTGEN_DIGEST:=sha256:de7b97b1d19a34b674d0c4bc1da4db999f04ae355923a9a994 # renovate: datasource=docker export CILIUM_NODEINIT_REPO:=quay.io/cilium/startup-script export CILIUM_NODEINIT_VERSION:=1755531540-60ee83e -export CILIUM_NODEINIT_DIGEST:=sha256:0c91245afb3a4ff78b5cc8c09226806e94a9a10eb0adb74a85e0eeed2a5cae8c +export CILIUM_NODEINIT_DIGEST:=sha256:5bdca3c2dec2c79f58d45a7a560bf1098c2126350c901379fe850b7f78d3d757 # renovate: datasource=docker export CILIUM_ENVOY_REPO:=quay.io/cilium/cilium-envoy diff --git a/install/kubernetes/cilium/README.md b/install/kubernetes/cilium/README.md index 90cbd3781061d..6e31c7a1e96b6 100644 --- a/install/kubernetes/cilium/README.md +++ b/install/kubernetes/cilium/README.md @@ -261,7 +261,7 @@ contributors across the globe, there is almost always someone available to help. | clustermesh.apiserver.service.loadBalancerClass | string | `nil` | Configure a loadBalancerClass. Allows to configure the loadBalancerClass on the clustermesh-apiserver LB service in case the Service type is set to LoadBalancer (requires Kubernetes 1.24+). | | clustermesh.apiserver.service.loadBalancerIP | string | `nil` | Configure a specific loadBalancerIP. Allows to configure a specific loadBalancerIP on the clustermesh-apiserver LB service in case the Service type is set to LoadBalancer. | | clustermesh.apiserver.service.loadBalancerSourceRanges | list | `[]` | Configure loadBalancerSourceRanges. Allows to configure the source IP ranges allowed to access the clustermesh-apiserver LB service in case the Service type is set to LoadBalancer. | -| clustermesh.apiserver.service.nodePort | int | `32379` | Optional port to use as the node port for apiserver access. WARNING: make sure to configure a different NodePort in each cluster if kube-proxy replacement is enabled, as Cilium is currently affected by a known bug (#24692) when NodePorts are handled by the KPR implementation. If a service with the same NodePort exists both in the local and the remote cluster, all traffic originating from inside the cluster and targeting the corresponding NodePort will be redirected to a local backend, regardless of whether the destination node belongs to the local or the remote cluster. | +| clustermesh.apiserver.service.nodePort | int | `32379` | Optional port to use as the node port for apiserver access. | | clustermesh.apiserver.service.type | string | `"NodePort"` | The type of service used for apiserver access. | | clustermesh.apiserver.terminationGracePeriodSeconds | int | `30` | terminationGracePeriodSeconds for the clustermesh-apiserver deployment | | clustermesh.apiserver.tls.admin | object | `{"cert":"","key":""}` | base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key. Used if 'auto' is not enabled. | @@ -364,6 +364,7 @@ contributors across the globe, there is almost always someone available to help. | enableInternalTrafficPolicy | bool | `true` | Enable Internal Traffic Policy | | enableLBIPAM | bool | `true` | Enable LoadBalancer IP Address Management | | enableMasqueradeRouteSource | bool | `false` | Enables masquerading to the source of the route for traffic leaving the node from endpoints. | +| enableNoServiceEndpointsRoutable | bool | `true` | Enable routing to a service that has zero endpoints | | enableNonDefaultDenyPolicies | bool | `true` | Enable Non-Default-Deny policies | | enableXTSocketFallback | bool | `true` | Enables the fallback compatibility solution for when the xt_socket kernel module is missing and it is needed for the datapath L7 redirection to work properly. See documentation for details on when this can be disabled: https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel. | | encryption.enabled | bool | `false` | Enable transparent network encryption. | @@ -786,7 +787,7 @@ contributors across the globe, there is almost always someone available to help. | nodeinit.extraEnv | list | `[]` | Additional nodeinit environment variables. | | nodeinit.extraVolumeMounts | list | `[]` | Additional nodeinit volumeMounts. | | nodeinit.extraVolumes | list | `[]` | Additional nodeinit volumes. | -| nodeinit.image | object | `{"digest":"sha256:0c91245afb3a4ff78b5cc8c09226806e94a9a10eb0adb74a85e0eeed2a5cae8c","override":null,"pullPolicy":"Always","repository":"quay.io/cilium/startup-script","tag":"1755531540-60ee83e","useDigest":true}` | node-init image. | +| nodeinit.image | object | `{"digest":"sha256:5bdca3c2dec2c79f58d45a7a560bf1098c2126350c901379fe850b7f78d3d757","override":null,"pullPolicy":"Always","repository":"quay.io/cilium/startup-script","tag":"1755531540-60ee83e","useDigest":true}` | node-init image. | | nodeinit.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for nodeinit pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | nodeinit.podAnnotations | object | `{}` | Annotations to be added to node-init pods. | | nodeinit.podLabels | object | `{}` | Labels to be added to node-init pods. | diff --git a/install/kubernetes/cilium/templates/cilium-agent/clusterrole.yaml b/install/kubernetes/cilium/templates/cilium-agent/clusterrole.yaml index 6aef1b2123ddd..119d0bd73e2a2 100644 --- a/install/kubernetes/cilium/templates/cilium-agent/clusterrole.yaml +++ b/install/kubernetes/cilium/templates/cilium-agent/clusterrole.yaml @@ -117,6 +117,7 @@ rules: - ciliumcidrgroups - ciliuml2announcementpolicies - ciliumpodippools + - ciliumvteppolicies verbs: - list - watch diff --git a/install/kubernetes/cilium/templates/cilium-configmap.yaml b/install/kubernetes/cilium/templates/cilium-configmap.yaml index edaea581bd18a..cec078b2ed7b5 100644 --- a/install/kubernetes/cilium/templates/cilium-configmap.yaml +++ b/install/kubernetes/cilium/templates/cilium-configmap.yaml @@ -33,12 +33,6 @@ {{- end }} {{- $defaultBpfCtTcpMax = 0 -}} {{- $defaultBpfCtAnyMax = 0 -}} - {{- $defaultKubeProxyReplacement = "probe" -}} -{{- end -}} - -{{- /* Default values when 1.9 was initially deployed */ -}} -{{- if semverCompare ">=1.9" (default "1.9" .Values.upgradeCompatibility) -}} - {{- $defaultKubeProxyReplacement = "probe" -}} {{- end -}} {{- /* Default values when 1.10 was initially deployed */ -}} @@ -52,7 +46,6 @@ {{- if .Values.azure.enabled }} {{- $azureUsePrimaryAddress = "false" -}} {{- end }} - {{- $defaultKubeProxyReplacement = "disabled" -}} {{- $defaultDNSProxyEnableTransparentMode = "true" -}} {{- end -}} @@ -780,6 +773,10 @@ data: kube-proxy-replacement-healthz-bind-address: {{ default "" .Values.kubeProxyReplacementHealthzBindAddr | quote}} {{- end }} +{{- if hasKey .Values "enableNoServiceEndpointsRoutable" }} + enable-no-service-endpoints-routable: {{ .Values.enableNoServiceEndpointsRoutable | quote }} +{{- end }} + {{- if $socketLB }} {{- if hasKey $socketLB "enabled" }} bpf-lb-sock: {{ $socketLB.enabled | quote }} @@ -971,6 +968,10 @@ data: # Capacity of the buffer to store recent events. hubble-event-buffer-capacity: {{ .Values.hubble.eventBufferCapacity | quote }} {{- end }} +{{- if hasKey .Values.hubble "lostEventSendInterval" }} + # Interval to send lost events from Observer server. + hubble-lost-event-send-interval: {{ include "validateDuration" .Values.hubble.lostEventSendInterval | quote }} +{{- end }} {{- if or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled}} # Address to expose Hubble metrics (e.g. ":7070"). Metrics server will be disabled if this # field is not set. diff --git a/install/kubernetes/cilium/templates/cilium-operator/clusterrole.yaml b/install/kubernetes/cilium/templates/cilium-operator/clusterrole.yaml index 4cdf386df8b4e..82c65e4019616 100644 --- a/install/kubernetes/cilium/templates/cilium-operator/clusterrole.yaml +++ b/install/kubernetes/cilium/templates/cilium-operator/clusterrole.yaml @@ -262,6 +262,7 @@ rules: - ciliuml2announcementpolicies.cilium.io - ciliumpodippools.cilium.io - ciliumgatewayclassconfigs.cilium.io + - ciliumvteppolicies.cilium.io - apiGroups: - cilium.io resources: diff --git a/install/kubernetes/cilium/templates/cilium-preflight/clusterrole.yaml b/install/kubernetes/cilium/templates/cilium-preflight/clusterrole.yaml index 9a2c06153c6ec..abfaf9c3d8986 100644 --- a/install/kubernetes/cilium/templates/cilium-preflight/clusterrole.yaml +++ b/install/kubernetes/cilium/templates/cilium-preflight/clusterrole.yaml @@ -117,6 +117,7 @@ rules: - ciliumcidrgroups - ciliuml2announcementpolicies - ciliumpodippools + - ciliumvteppolicies verbs: - list - watch diff --git a/install/kubernetes/cilium/values.schema.json b/install/kubernetes/cilium/values.schema.json index 9b3192e05661e..5d2d123ea8bc1 100644 --- a/install/kubernetes/cilium/values.schema.json +++ b/install/kubernetes/cilium/values.schema.json @@ -1834,6 +1834,9 @@ "enableMasqueradeRouteSource": { "type": "boolean" }, + "enableNoServiceEndpointsRoutable": { + "type": "boolean" + }, "enableNonDefaultDenyPolicies": { "type": "boolean" }, diff --git a/install/kubernetes/cilium/values.yaml b/install/kubernetes/cilium/values.yaml index e65a98e03ca69..3a5b7203f75a8 100644 --- a/install/kubernetes/cilium/values.yaml +++ b/install/kubernetes/cilium/values.yaml @@ -1155,6 +1155,8 @@ healthCheckICMPFailureThreshold: 3 hostFirewall: # -- Enables the enforcement of host policies in the eBPF datapath. enabled: false +# -- Enable routing to a service that has zero endpoints +enableNoServiceEndpointsRoutable: true # -- Configure socket LB socketLB: # -- Enable socket LB @@ -1223,6 +1225,9 @@ hubble: # 2047, 4095, 8191, 16383, 32767, 65535 # eventBufferCapacity: "4095" + # -- The interval at which Hubble will send out lost events from the Observer server, if any. + # lostEventSendInterval: 1s + # -- Hubble metrics configuration. # See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics # for more comprehensive documentation about Hubble metrics. @@ -3089,7 +3094,7 @@ nodeinit: override: ~ repository: "quay.io/cilium/startup-script" tag: "1755531540-60ee83e" - digest: "sha256:0c91245afb3a4ff78b5cc8c09226806e94a9a10eb0adb74a85e0eeed2a5cae8c" + digest: "sha256:5bdca3c2dec2c79f58d45a7a560bf1098c2126350c901379fe850b7f78d3d757" useDigest: true pullPolicy: "Always" # -- The priority class to use for the nodeinit pod. @@ -3510,14 +3515,6 @@ clustermesh: # -- The type of service used for apiserver access. type: NodePort # -- Optional port to use as the node port for apiserver access. - # - # WARNING: make sure to configure a different NodePort in each cluster if - # kube-proxy replacement is enabled, as Cilium is currently affected by a known - # bug (#24692) when NodePorts are handled by the KPR implementation. If a service - # with the same NodePort exists both in the local and the remote cluster, all - # traffic originating from inside the cluster and targeting the corresponding - # NodePort will be redirected to a local backend, regardless of whether the - # destination node belongs to the local or the remote cluster. nodePort: 32379 # -- Annotations for the clustermesh-apiserver service. # Example annotations to configure an internal load balancer on different cloud providers: diff --git a/install/kubernetes/cilium/values.yaml.tmpl b/install/kubernetes/cilium/values.yaml.tmpl index 4b13c4c4b81ca..07dbfda0bc45c 100644 --- a/install/kubernetes/cilium/values.yaml.tmpl +++ b/install/kubernetes/cilium/values.yaml.tmpl @@ -1165,6 +1165,8 @@ healthCheckICMPFailureThreshold: 3 hostFirewall: # -- Enables the enforcement of host policies in the eBPF datapath. enabled: false +# -- Enable routing to a service that has zero endpoints +enableNoServiceEndpointsRoutable: true # -- Configure socket LB socketLB: # -- Enable socket LB @@ -1233,6 +1235,9 @@ hubble: # 2047, 4095, 8191, 16383, 32767, 65535 # eventBufferCapacity: "4095" + # -- The interval at which Hubble will send out lost events from the Observer server, if any. + # lostEventSendInterval: 1s + # -- Hubble metrics configuration. # See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics # for more comprehensive documentation about Hubble metrics. @@ -2157,7 +2162,7 @@ localRedirectPolicy: false localRedirectPolicies: # -- Enable local redirect policies. enabled: false - + # -- Limit the allowed addresses in Address Matcher rule of # Local Redirect Policies to the given CIDRs. # @schema@ @@ -2933,7 +2938,7 @@ operator: # @schema # type: [null, array] # @schema - tolerations: + tolerations: - key: "node-role.kubernetes.io/control-plane" operator: Exists - key: "node-role.kubernetes.io/master" #deprecated @@ -3540,14 +3545,6 @@ clustermesh: # -- The type of service used for apiserver access. type: NodePort # -- Optional port to use as the node port for apiserver access. - # - # WARNING: make sure to configure a different NodePort in each cluster if - # kube-proxy replacement is enabled, as Cilium is currently affected by a known - # bug (#24692) when NodePorts are handled by the KPR implementation. If a service - # with the same NodePort exists both in the local and the remote cluster, all - # traffic originating from inside the cluster and targeting the corresponding - # NodePort will be redirected to a local backend, regardless of whether the - # destination node belongs to the local or the remote cluster. nodePort: 32379 # -- Annotations for the clustermesh-apiserver service. # Example annotations to configure an internal load balancer on different cloud providers: diff --git a/operator/cmd/allocator_test.go b/operator/cmd/allocator_test.go index c79247a5470bf..518ac37d9f668 100644 --- a/operator/cmd/allocator_test.go +++ b/operator/cmd/allocator_test.go @@ -12,9 +12,12 @@ import ( "time" "github.com/cilium/hive/hivetest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + operatorK8s "github.com/cilium/cilium/operator/k8s" "github.com/cilium/cilium/pkg/ipam/allocator/clusterpool/cidralloc" "github.com/cilium/cilium/pkg/ipam/allocator/podcidr" "github.com/cilium/cilium/pkg/ipam/cidrset" @@ -22,7 +25,6 @@ import ( cilium_api_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" cilium_fake "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake" k8sClient "github.com/cilium/cilium/pkg/k8s/client/testutils" - "github.com/cilium/cilium/pkg/testutils" ) // TestPodCIDRAllocatorOverlap tests that, on startup all nodes with assigned podCIDRs are processed so that nodes @@ -38,7 +40,6 @@ func TestPodCIDRAllocatorOverlap(t *testing.T) { } func podCIDRAllocatorOverlapTestRun(t *testing.T) { - logger := hivetest.Logger(t) var wg sync.WaitGroup defer wg.Wait() @@ -48,14 +49,10 @@ func podCIDRAllocatorOverlapTestRun(t *testing.T) { // Create a new CIDR allocator _, cidr, err := net.ParseCIDR("10.129.0.0/16") - if err != nil { - panic(err) - } + require.NoError(t, err) set, err := cidrset.NewCIDRSet(cidr, 24) - if err != nil { - panic(err) - } + require.NoError(t, err) // Create a mock APIServer client where we have 2 existing nodes, one with a PodCIDR and one without. // When List'ed from the client, first node-a is returned then node-b @@ -87,56 +84,45 @@ func podCIDRAllocatorOverlapTestRun(t *testing.T) { CiliumFakeClientset: fakeClient, } + ciliumNodes, err := operatorK8s.CiliumNodeResource(hivetest.Lifecycle(t), fakeSet, nil) + require.NoError(t, err) + // Create a new pod manager with only our IPv4 allocator and fake client set. podCidrManager := podcidr.NewNodesPodCIDRManager(hivetest.Logger(t), []cidralloc.CIDRAllocator{ set, }, nil, &ciliumNodeUpdateImplementation{clientset: fakeSet}, nil) // start synchronization. - cns := newCiliumNodeSynchronizer(logger, fakeSet, nil, podCidrManager, false, nil) - if err := cns.Start(ctx, &wg, nil); err != nil { - t.Fatal(err) - } - - // Wait for the "node manager synced" signal, just like we would normally. - <-cns.ciliumNodeManagerQueueSynced - - // Trigger the Resync after the cache sync signal - podCidrManager.Resync(ctx, time.Time{}) + wg.Go(func() { + watchCiliumNodes(ctx, ciliumNodes, podCidrManager, true) + }) - err = testutils.WaitUntil(func() bool { + require.EventuallyWithT(t, func(c *assert.CollectT) { // Get node A from the mock APIServer nodeAInt, err := fakeClient.Tracker().Get(ciliumnodesResource, "", "node-a") - if err != nil { - return false + if !assert.NoError(c, err) { + return } nodeA := nodeAInt.(*cilium_api_v2.CiliumNode) // Get node B from the mock APIServer nodeBInt, err := fakeClient.Tracker().Get(ciliumnodesResource, "", "node-b") - if err != nil { - return false + if !assert.NoError(c, err) { + return } nodeB := nodeBInt.(*cilium_api_v2.CiliumNode) - if len(nodeA.Spec.IPAM.PodCIDRs) != 1 { - return false + if !assert.Len(c, nodeA.Spec.IPAM.PodCIDRs, 1) { + return } - if len(nodeB.Spec.IPAM.PodCIDRs) != 1 { - return false + if !assert.Len(c, nodeB.Spec.IPAM.PodCIDRs, 1) { + return } - // The PodCIDRs should be distinct. - if nodeA.Spec.IPAM.PodCIDRs[0] == nodeB.Spec.IPAM.PodCIDRs[0] { - t.Fatal("Node A and Node B are assigned overlapping PodCIDRs") - } - - return true - }, 2*time.Minute) - if err != nil { - t.Fatalf("nodes have no pod CIDR: %s", err) - } + assert.NotEqual(c, nodeA.Spec.IPAM.PodCIDRs, nodeB.Spec.IPAM.PodCIDRs, + "Node A and Node B should not be assigned overlapping PodCIDRs") + }, 2*time.Minute, 10*time.Millisecond) } var ciliumnodesResource = schema.GroupVersionResource{Group: "cilium.io", Version: "v2", Resource: "ciliumnodes"} diff --git a/operator/cmd/cilium_node.go b/operator/cmd/cilium_node.go index ab47b92ee3dc1..abfe0a4c94646 100644 --- a/operator/cmd/cilium_node.go +++ b/operator/cmd/cilium_node.go @@ -5,428 +5,49 @@ package cmd import ( "context" - "fmt" - "log/slog" - "reflect" "strings" - "sync" "time" - "k8s.io/apimachinery/pkg/api/errors" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - operatorK8s "github.com/cilium/cilium/operator/k8s" - operatorOption "github.com/cilium/cilium/operator/option" "github.com/cilium/cilium/pkg/annotation" "github.com/cilium/cilium/pkg/ipam/allocator" cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" k8sClient "github.com/cilium/cilium/pkg/k8s/client" - "github.com/cilium/cilium/pkg/k8s/informer" "github.com/cilium/cilium/pkg/k8s/resource" - corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" - "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels" - "github.com/cilium/cilium/pkg/k8s/utils" - "github.com/cilium/cilium/pkg/kvstore" - "github.com/cilium/cilium/pkg/kvstore/store" - "github.com/cilium/cilium/pkg/logging" - "github.com/cilium/cilium/pkg/logging/logfields" - nodeStore "github.com/cilium/cilium/pkg/node/store" - nodeTypes "github.com/cilium/cilium/pkg/node/types" - "github.com/cilium/cilium/pkg/option" ) -// ciliumNodeName is only used to implement NamedKey interface. -type ciliumNodeName struct { - cluster string - name string -} - -func (c *ciliumNodeName) GetKeyName() string { - return nodeTypes.GetKeyNodeName(c.cluster, c.name) -} - -// ciliumNodeManagerQueueSyncedKey indicates that the caches -// are synced. The underscore prefix ensures that it can never -// clash with a real key, as Kubernetes does not allow object -// names to start with an underscore. -const ciliumNodeManagerQueueSyncedKey = "_ciliumNodeManagerQueueSynced" - -type ciliumNodeSynchronizer struct { - logger *slog.Logger - clientset k8sClient.Clientset - kvstoreClient kvstore.Client - nodeManager allocator.NodeEventHandler - withKVStore bool - - // ciliumNodeStore contains all CiliumNodes present in k8s. - ciliumNodeStore cache.Store - - k8sCiliumNodesCacheSynced chan struct{} - ciliumNodeManagerQueueSynced chan struct{} - workqueueMetricsProvider workqueue.MetricsProvider -} - -func newCiliumNodeSynchronizer(logger *slog.Logger, clientset k8sClient.Clientset, kvstoreClient kvstore.Client, nodeManager allocator.NodeEventHandler, withKVStore bool, workqueueMetricsProvider workqueue.MetricsProvider) *ciliumNodeSynchronizer { - return &ciliumNodeSynchronizer{ - logger: logger, - clientset: clientset, - kvstoreClient: kvstoreClient, - nodeManager: nodeManager, - withKVStore: withKVStore, - - k8sCiliumNodesCacheSynced: make(chan struct{}), - ciliumNodeManagerQueueSynced: make(chan struct{}), - workqueueMetricsProvider: workqueueMetricsProvider, - } -} - -func (s *ciliumNodeSynchronizer) Start(ctx context.Context, wg *sync.WaitGroup, podsStore resource.Store[*corev1.Pod]) error { - var ( - ciliumNodeKVStore *store.SharedStore - err error - nodeManagerSyncHandler func(key string) error - kvStoreSyncHandler func(key string) error - connectedToKVStore = make(chan struct{}) - resourceEventHandler = cache.ResourceEventHandlerFuncs{} - ) - - var ciliumNodeManagerQueueConfig = workqueue.TypedRateLimitingQueueConfig[string]{ - Name: "node_manager", - MetricsProvider: s.workqueueMetricsProvider, - } - var kvStoreQueueConfig = workqueue.TypedRateLimitingQueueConfig[string]{ - Name: "kvstore", - MetricsProvider: s.workqueueMetricsProvider, - } - - if operatorOption.Config.EnableMetrics { - ciliumNodeManagerQueueConfig.MetricsProvider = s.workqueueMetricsProvider - kvStoreQueueConfig.MetricsProvider = s.workqueueMetricsProvider - } - - var ciliumNodeManagerQueue = workqueue.NewTypedRateLimitingQueueWithConfig[string](workqueue.DefaultTypedControllerRateLimiter[string](), ciliumNodeManagerQueueConfig) - var kvStoreQueue = workqueue.NewTypedRateLimitingQueueWithConfig[string]( - workqueue.NewTypedItemExponentialFailureRateLimiter[string](1*time.Second, 120*time.Second), - kvStoreQueueConfig, - ) - - // KVStore is enabled -> we will run the event handler to sync objects into - // KVStore. - if s.withKVStore { - // Connect to the KVStore asynchronously so that we are able to start - // the operator without relying on the KVStore to be up. - // Start a goroutine to GC all CiliumNodes from the KVStore that are - // no longer running. - wg.Add(1) - go func() { - defer wg.Done() - - s.logger.InfoContext(ctx, "Starting to synchronize CiliumNode custom resources to KVStore") - - ciliumNodeKVStore, err = store.JoinSharedStore(s.logger, - store.Configuration{ - Backend: s.kvstoreClient, - Prefix: nodeStore.NodeStorePrefix, - KeyCreator: nodeStore.KeyCreator, - - // We never upsert anything in this store, - // so let's disable synchronization. - SynchronizationInterval: 0, - }) - - if err != nil { - logging.Fatal(s.logger, "Unable to setup node watcher", logfields.Error, err) - } - close(connectedToKVStore) - - <-s.k8sCiliumNodesCacheSynced - // Since we processed all events received from k8s we know that - // at this point the list in ciliumNodeStore should be the source of - // truth and we need to delete all nodes in the kvNodeStore that are - // *not* present in the ciliumNodeStore. - listOfCiliumNodes := s.ciliumNodeStore.ListKeys() - - kvStoreNodes := ciliumNodeKVStore.SharedKeysMap() - - for _, ciliumNode := range listOfCiliumNodes { - // The remaining kvStoreNodes are leftovers that need to be GCed - kvStoreNodeName := nodeTypes.GetKeyNodeName(option.Config.ClusterName, ciliumNode) - delete(kvStoreNodes, kvStoreNodeName) - } - - if len(listOfCiliumNodes) == 0 && len(kvStoreNodes) != 0 { - s.logger.WarnContext(ctx, "Preventing GC of nodes in the KVStore due the nonexistence of any CiliumNodes in kube-apiserver") - return - } - - for _, kvStoreNode := range kvStoreNodes { - // Only delete the nodes that belong to our cluster - if strings.HasPrefix(kvStoreNode.GetKeyName(), option.Config.ClusterName) { - ciliumNodeKVStore.DeleteLocalKey(ctx, kvStoreNode) - } - } - }() - } else { - s.logger.InfoContext(ctx, "Starting to synchronize CiliumNode custom resources") - } - - if s.nodeManager != nil { - nodeManagerSyncHandler = s.syncHandlerConstructor( - func(node *cilium_v2.CiliumNode) error { - s.nodeManager.Delete(node) - return nil - }, - func(node *cilium_v2.CiliumNode) error { - value, ok := node.Annotations[annotation.IPAMIgnore] - if ok && strings.ToLower(value) == "true" { - return nil - } - - // node is deep copied before it is stored in pkg/aws/eni - s.nodeManager.Upsert(node) - return nil - }) - } - - if s.withKVStore { - ciliumPodsSelector, err := labels.Parse(operatorOption.Config.CiliumPodLabels) - if err != nil { - return fmt.Errorf("unable to parse cilium pod selector: %w", err) - } - - kvStoreSyncHandler = s.syncHandlerConstructor( - func(node *cilium_v2.CiliumNode) error { - // Check if a Cilium agent is still running on the given node, and - // in that case retry later, because it would recognize the deletion - // event and recreate the kvstore entry right away. Hence, defeating - // the whole purpose of this GC logic, and leading to the node entry - // being eventually deleted by the lease expiration only. - pods, err := podsStore.ByIndex(operatorK8s.PodNodeNameIndex, node.GetName()) - if err != nil { - return fmt.Errorf("retrieving pods indexed by node %q: %w", node.GetName(), err) - } - - for _, pod := range pods { - if utils.IsPodRunning(pod.Status) && ciliumPodsSelector.Matches(labels.Set(pod.Labels)) { - return fmt.Errorf("skipping deletion from kvstore, as Cilium agent is still running on %q", node.GetName()) - } - } - - nodeDel := ciliumNodeName{ - cluster: option.Config.ClusterName, - name: node.Name, - } - ciliumNodeKVStore.DeleteLocalKey(ctx, &nodeDel) - return nil - }, - func(node *cilium_v2.CiliumNode) error { - return nil - }) - } - - // If both nodeManager and KVStore are nil, then we don't need to handle - // any watcher events, but we will need to keep all CiliumNodes in - // memory because 'ciliumNodeStore' is used across the operator - // to get the latest state of a CiliumNode. - if s.withKVStore || s.nodeManager != nil { - resourceEventHandler = cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj any) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - s.logger.WarnContext(ctx, "Unable to process CiliumNode Add event", logfields.Error, err) - return - } - if s.nodeManager != nil { - ciliumNodeManagerQueue.Add(key) - } - if s.withKVStore { - kvStoreQueue.Add(key) - } - }, - UpdateFunc: func(oldObj, newObj any) { - if oldNode := informer.CastInformerEvent[cilium_v2.CiliumNode](s.logger, oldObj); oldNode != nil { - if newNode := informer.CastInformerEvent[cilium_v2.CiliumNode](s.logger, newObj); newNode != nil { - if oldNode.DeepEqual(newNode) { - return - } - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(newObj) - if err != nil { - s.logger.WarnContext(ctx, "Unable to process CiliumNode Update event", logfields.Error, err) - return - } - if s.nodeManager != nil { - ciliumNodeManagerQueue.Add(key) - } - if s.withKVStore { - kvStoreQueue.Add(key) - } - } else { - s.logger.WarnContext(ctx, - "Unknown CiliumNode object type received", - logfields.Type, reflect.TypeOf(newNode), - logfields.Node, newNode, - ) - } - } else { - s.logger.WarnContext(ctx, - "Unknown CiliumNode object type received", - logfields.Type, reflect.TypeOf(oldNode), - logfields.Node, oldNode, - ) - } - }, - DeleteFunc: func(obj any) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - s.logger.WarnContext(ctx, "Unable to process CiliumNode Delete event", logfields.Error, err) - return - } - if s.nodeManager != nil { - ciliumNodeManagerQueue.Add(key) - } - if s.withKVStore { - kvStoreQueue.Add(key) - } - }, - } - } - - // TODO: The operator is currently storing a full copy of the - // CiliumNode resource, as the resource grows, we may want to consider - // introducing a slim version of it. - var ciliumNodeInformer cache.Controller - s.ciliumNodeStore, ciliumNodeInformer = informer.NewInformer( - utils.ListerWatcherFromTyped[*cilium_v2.CiliumNodeList](s.clientset.CiliumV2().CiliumNodes()), - &cilium_v2.CiliumNode{}, - 0, - resourceEventHandler, - nil, - ) - - wg.Add(1) - go func() { - defer wg.Done() - - cache.WaitForCacheSync(ctx.Done(), ciliumNodeInformer.HasSynced) - close(s.k8sCiliumNodesCacheSynced) - ciliumNodeManagerQueue.Add(ciliumNodeManagerQueueSyncedKey) - s.logger.InfoContext(ctx, "CiliumNodes caches synced with Kubernetes") - // Only handle events if nodeManagerSyncHandler is not nil. If it is nil - // then there isn't any event handler set for CiliumNodes events. - if nodeManagerSyncHandler != nil { - go func() { - // infinite loop. run in a goroutine to unblock code execution - for s.processNextWorkItem(ciliumNodeManagerQueue, nodeManagerSyncHandler) { - } - }() - } - // Start handling events for KVStore **after** nodeManagerSyncHandler - // otherwise Cilium Operator will block until the KVStore is available. - // Only handle events if kvStoreSyncHandler is not nil. If it is nil - // then there isn't any event handler set for CiliumNodes events. - if s.withKVStore && kvStoreSyncHandler != nil { - <-connectedToKVStore - s.logger.InfoContext(ctx, "Connected to the KVStore, syncing CiliumNodes to the KVStore") - // infinite loop it will block code execution - for s.processNextWorkItem(kvStoreQueue, kvStoreSyncHandler) { +func watchCiliumNodes(ctx context.Context, ciliumNodes resource.Resource[*cilium_v2.CiliumNode], handler allocator.NodeEventHandler, withResync bool) { + // We will use CiliumNodes as the source of truth for the podCIDRs. + // Once the CiliumNodes are synchronized with the operator we will + // be able to watch for K8s Node events which they will be used + // to create the remaining CiliumNodes. + for ev := range ciliumNodes.Events(ctx) { + switch ev.Kind { + case resource.Upsert: + value, ok := ev.Object.Annotations[annotation.IPAMIgnore] + if !ok || strings.ToLower(value) != "true" { + handler.Upsert(ev.Object) } - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - defer kvStoreQueue.ShutDown() - defer ciliumNodeManagerQueue.ShutDown() - - ciliumNodeInformer.Run(ctx.Done()) - }() - - return nil -} - -func (s *ciliumNodeSynchronizer) syncHandlerConstructor(notFoundHandler, foundHandler func(node *cilium_v2.CiliumNode) error) func(key string) error { - return func(key string) error { - _, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - s.logger.Error("Unable to process CiliumNode event", logfields.Error, err) - return err - } - obj, exists, err := s.ciliumNodeStore.GetByKey(name) - // Delete handling - if !exists || errors.IsNotFound(err) { - return notFoundHandler(&cilium_v2.CiliumNode{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: name, - }, - }) - } - if err != nil { - s.logger.Warn("Unable to retrieve CiliumNode from watcher store", logfields.Error, err) - return err - } - cn, ok := obj.(*cilium_v2.CiliumNode) - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - return fmt.Errorf("couldn't get object from tombstone %T", obj) + case resource.Delete: + handler.Delete(ev.Object) + + case resource.Sync: + // We don't want CiliumNodes that don't have podCIDRs to be + // allocated with a podCIDR already being used by another node. + // For this reason we will call Resync after all CiliumNodes are + // synced with the operator to signal the node manager, since it + // knows all podCIDRs that are currently set in the cluster, that + // it can allocate podCIDRs for the nodes that don't have a podCIDR + // set. + if withResync { + handler.Resync(ctx, time.Time{}) } - cn, ok = tombstone.Obj.(*cilium_v2.CiliumNode) - if !ok { - return fmt.Errorf("tombstone contained object that is not a *cilium_v2.CiliumNode %T", obj) - } - } - if cn.DeletionTimestamp != nil { - return notFoundHandler(cn) - } - return foundHandler(cn) - } -} - -// processNextWorkItem process all events from the workqueue. -func (s *ciliumNodeSynchronizer) processNextWorkItem(queue workqueue.TypedRateLimitingInterface[string], syncHandler func(key string) error) bool { - key, quit := queue.Get() - if quit { - return false - } - defer queue.Done(key) - - if key == ciliumNodeManagerQueueSyncedKey { - close(s.ciliumNodeManagerQueueSynced) - return true - } - - err := syncHandler(key) - if err == nil { - // If err is nil we can forget it from the queue, if it is not nil - // the queue handler will retry to process this key until it succeeds. - if queue.NumRequeues(key) > 0 { - s.logger.Info("CiliumNode successfully reconciled after retries", logfields.NodeName, key) } - queue.Forget(key) - return true - } - const silentRetries = 5 - if queue.NumRequeues(key) < silentRetries { - s.logger.Info("Failed reconciling CiliumNode, will retry", - logfields.Error, err, - logfields.NodeName, key, - ) - } else { - s.logger.Warn( - "Failed reconciling CiliumNode, will retry", - logfields.Error, err, - logfields.NodeName, key, - ) + ev.Done(nil) } - - queue.AddRateLimited(key) - - return true } type ciliumNodeUpdateImplementation struct { diff --git a/operator/cmd/flags.go b/operator/cmd/flags.go index 5e6a87aa9d9b9..c45091f6c14b5 100644 --- a/operator/cmd/flags.go +++ b/operator/cmd/flags.go @@ -214,9 +214,6 @@ func InitGlobalFlags(logger *slog.Logger, cmd *cobra.Command, vp *viper.Viper) { flags.Bool(operatorOption.SyncK8sServices, true, "Synchronize Kubernetes services to kvstore") option.BindEnv(vp, operatorOption.SyncK8sServices) - flags.Bool(operatorOption.SyncK8sNodes, true, "Synchronize Kubernetes nodes to kvstore and perform CNP GC") - option.BindEnv(vp, operatorOption.SyncK8sNodes) - flags.Int(operatorOption.UnmanagedPodWatcherInterval, 15, "Interval to check for unmanaged kube-dns pods (0 to disable)") option.BindEnv(vp, operatorOption.UnmanagedPodWatcherInterval) diff --git a/operator/cmd/root.go b/operator/cmd/root.go index a0166f7dfa502..226b369158dda 100644 --- a/operator/cmd/root.go +++ b/operator/cmd/root.go @@ -12,7 +12,6 @@ import ( "path/filepath" "sync" "sync/atomic" - "time" "github.com/cilium/hive/cell" "github.com/spf13/cobra" @@ -43,6 +42,7 @@ import ( gatewayapi "github.com/cilium/cilium/operator/pkg/gateway-api" "github.com/cilium/cilium/operator/pkg/ingress" "github.com/cilium/cilium/operator/pkg/kvstore/locksweeper" + "github.com/cilium/cilium/operator/pkg/kvstore/nodesgc" "github.com/cilium/cilium/operator/pkg/lbipam" "github.com/cilium/cilium/operator/pkg/networkpolicy" "github.com/cilium/cilium/operator/pkg/nodeipam" @@ -59,7 +59,6 @@ import ( "github.com/cilium/cilium/pkg/dial" "github.com/cilium/cilium/pkg/gops" "github.com/cilium/cilium/pkg/hive" - "github.com/cilium/cilium/pkg/ipam/allocator" ipamOption "github.com/cilium/cilium/pkg/ipam/option" "github.com/cilium/cilium/pkg/k8s" "github.com/cilium/cilium/pkg/k8s/apis" @@ -310,6 +309,9 @@ var ( // configuration to describe, in form of prometheus metrics, which // features are enabled on the operator. features.Cell, + + // GC of stale node entries in the KVStore + nodesgc.Cell, ), ) @@ -533,7 +535,6 @@ type params struct { cell.In Lifecycle cell.Lifecycle Clientset k8sClient.Clientset - KVStoreClient kvstore.Client Resources operatorK8s.Resources SvcResolver *dial.ServiceResolver CfgClusterMeshPolicy cmtypes.PolicyConfig @@ -549,7 +550,6 @@ func registerLegacyOnLeader(p params) { ctx: ctx, cancel: cancel, clientset: p.Clientset, - kvstoreClient: p.KVStoreClient, resources: p.Resources, cfgClusterMeshPolicy: p.CfgClusterMeshPolicy, workqueueMetricsProvider: p.WorkQueueMetricsProvider, @@ -567,7 +567,6 @@ type legacyOnLeader struct { ctx context.Context cancel context.CancelFunc clientset k8sClient.Clientset - kvstoreClient kvstore.Client wg sync.WaitGroup resources operatorK8s.Resources cfgClusterMeshPolicy cmtypes.PolicyConfig @@ -609,11 +608,6 @@ func (legacy *legacyOnLeader) onStart(ctx cell.HookContext) error { }() } - var ( - nodeManager allocator.NodeEventHandler - withKVStore bool - ) - legacy.logger.InfoContext(ctx, "Initializing IPAM", logfields.Mode, option.Config.IPAM, @@ -647,11 +641,17 @@ func (legacy *legacyOnLeader) onStart(ctx cell.HookContext) error { logging.Fatal(legacy.logger, fmt.Sprintf("Unable to start %s allocator", ipamMode), logfields.Error, err) } - nodeManager = nm - } + legacy.wg.Go(func() { + // The NodeEventHandler uses operatorWatchers.PodStore for IPAM surge allocation. + podStore, err := legacy.resources.Pods.Store(legacy.ctx) + if err != nil { + logging.Fatal(legacy.logger, "Unable to retrieve Pod store from Pod resource watcher", logfields.Error, err) + } + operatorWatchers.PodStore = podStore.CacheStore() - if legacy.kvstoreClient.IsEnabled() && legacy.clientset.IsEnabled() && operatorOption.Config.SyncK8sNodes { - withKVStore = true + withResync := option.Config.IPAM == ipamOption.IPAMClusterPool || option.Config.IPAM == ipamOption.IPAMMultiPool + watchCiliumNodes(legacy.ctx, legacy.resources.CiliumNodes, nm, withResync) + }) } if legacy.clientset.IsEnabled() && @@ -669,45 +669,13 @@ func (legacy *legacyOnLeader) onStart(ctx cell.HookContext) error { watcherLogger) } - ciliumNodeSynchronizer := newCiliumNodeSynchronizer(legacy.logger, legacy.clientset, legacy.kvstoreClient, nodeManager, withKVStore, legacy.workqueueMetricsProvider) - if legacy.clientset.IsEnabled() { - // ciliumNodeSynchronizer uses operatorWatchers.PodStore for IPAM surge - // allocation. Initializing PodStore from Pod resource is temporary until - // ciliumNodeSynchronizer is migrated to a cell. - podStore, err := legacy.resources.Pods.Store(legacy.ctx) - if err != nil { - logging.Fatal(legacy.logger, "Unable to retrieve Pod store from Pod resource watcher", logfields.Error, err) - } - operatorWatchers.PodStore = podStore.CacheStore() - - if err := ciliumNodeSynchronizer.Start(legacy.ctx, &legacy.wg, podStore); err != nil { - logging.Fatal(legacy.logger, "Unable to setup cilium node synchronizer", logfields.Error, err) - } - if operatorOption.Config.NodesGCInterval != 0 { - operatorWatchers.RunCiliumNodeGC(legacy.ctx, &legacy.wg, legacy.clientset, ciliumNodeSynchronizer.ciliumNodeStore, + operatorWatchers.RunCiliumNodeGC(legacy.ctx, &legacy.wg, legacy.clientset, legacy.resources.CiliumNodes, operatorOption.Config.NodesGCInterval, watcherLogger, legacy.workqueueMetricsProvider) } } - if option.Config.IPAM == ipamOption.IPAMClusterPool || option.Config.IPAM == ipamOption.IPAMMultiPool { - // We will use CiliumNodes as the source of truth for the podCIDRs. - // Once the CiliumNodes are synchronized with the operator we will - // be able to watch for K8s Node events which they will be used - // to create the remaining CiliumNodes. - <-ciliumNodeSynchronizer.ciliumNodeManagerQueueSynced - - // We don't want CiliumNodes that don't have podCIDRs to be - // allocated with a podCIDR already being used by another node. - // For this reason we will call Resync after all CiliumNodes are - // synced with the operator to signal the node manager, since it - // knows all podCIDRs that are currently set in the cluster, that - // it can allocate podCIDRs for the nodes that don't have a podCIDR - // set. - nodeManager.Resync(legacy.ctx, time.Time{}) - } - if option.Config.IdentityAllocationMode == option.IdentityAllocationModeCRD || option.Config.IdentityAllocationMode == option.IdentityAllocationModeDoubleWriteReadKVstore || option.Config.IdentityAllocationMode == option.IdentityAllocationModeDoubleWriteReadCRD { diff --git a/operator/metrics/metrics.go b/operator/metrics/metrics.go index 4f9d5b4259f7a..94f6e5362a28f 100644 --- a/operator/metrics/metrics.go +++ b/operator/metrics/metrics.go @@ -10,6 +10,7 @@ import ( "github.com/cilium/hive/cell" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" + k8sCtrlMetrics "sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics" "github.com/cilium/cilium/pkg/hive" "github.com/cilium/cilium/pkg/metrics" @@ -71,6 +72,9 @@ func initializeMetrics(p params) { metrics.WorkQueueRetries, ) + p.Registry.Register(k8sCtrlMetrics.ReadCertificateTotal) + p.Registry.Register(k8sCtrlMetrics.ReadCertificateErrors) + metrics.InitOperatorMetrics() p.Registry.MustRegister(metrics.ErrorsWarnings) metrics.FlushLoggingMetrics() diff --git a/operator/option/config.go b/operator/option/config.go index 955569a2ac8d6..eb59868b06729 100644 --- a/operator/option/config.go +++ b/operator/option/config.go @@ -49,9 +49,6 @@ const ( // SyncK8sServices synchronizes k8s services into the kvstore SyncK8sServices = "synchronize-k8s-services" - // SyncK8sNodes synchronizes k8s nodes into the kvstore - SyncK8sNodes = "synchronize-k8s-nodes" - // UnmanagedPodWatcherInterval is the interval to check for unmanaged kube-dns pods (0 to disable) UnmanagedPodWatcherInterval = "unmanaged-pod-watcher-interval" @@ -231,9 +228,6 @@ type OperatorConfig struct { // SyncK8sServices synchronizes k8s services into the kvstore SyncK8sServices bool - // SyncK8sNodes synchronizes k8s nodes into the kvstore - SyncK8sNodes bool - // UnmanagedPodWatcherInterval is the interval to check for unmanaged kube-dns pods (0 to disable) UnmanagedPodWatcherInterval int @@ -401,7 +395,6 @@ func (c *OperatorConfig) Populate(logger *slog.Logger, vp *viper.Viper) { c.EnableMetrics = vp.GetBool(EnableMetrics) c.EndpointGCInterval = vp.GetDuration(EndpointGCInterval) c.SyncK8sServices = vp.GetBool(SyncK8sServices) - c.SyncK8sNodes = vp.GetBool(SyncK8sNodes) c.UnmanagedPodWatcherInterval = vp.GetInt(UnmanagedPodWatcherInterval) c.NodeCIDRMaskSizeIPv4 = vp.GetInt(NodeCIDRMaskSizeIPv4) c.NodeCIDRMaskSizeIPv6 = vp.GetInt(NodeCIDRMaskSizeIPv6) diff --git a/operator/pkg/ciliumidentity/controller_test.go b/operator/pkg/ciliumidentity/controller_test.go index 3ee39444a63b4..7c8755b370ea9 100644 --- a/operator/pkg/ciliumidentity/controller_test.go +++ b/operator/pkg/ciliumidentity/controller_test.go @@ -419,17 +419,13 @@ func TestUpdateUsedCIDIsReverted(t *testing.T) { require.EventuallyWithT(t, func(ct *assert.CollectT) { cids := store.List() - assert.Len(ct, cids, 1) - if len(cids) != 1 { + if !assert.Len(ct, cids, 1) { return } cid = cids[0] - if !cmp.Equal(cid.SecurityLabels, cid1.SecurityLabels) { - t.Fatalf("expected labels %v, got %v", cid.SecurityLabels, cid1.SecurityLabels) - } + assert.Equal(ct, cid1.SecurityLabels, cid.SecurityLabels) }, WaitUntilTimeout, 100*time.Millisecond) - } func TestDeleteUsedCIDIsRecreated(t *testing.T) { diff --git a/operator/pkg/gateway-api/gateway.go b/operator/pkg/gateway-api/gateway.go index 9c091fed301a0..cfaa88bfd9ba5 100644 --- a/operator/pkg/gateway-api/gateway.go +++ b/operator/pkg/gateway-api/gateway.go @@ -11,6 +11,7 @@ import ( "slices" corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -151,7 +152,7 @@ func (r *gatewayReconciler) SetupWithManager(mgr ctrl.Manager) error { // Watch created and owned resources Owns(&ciliumv2.CiliumEnvoyConfig{}). Owns(&corev1.Service{}). - Owns(&corev1.Endpoints{}) + Owns(&discoveryv1.EndpointSlice{}) if tlsRouteEnabled { // Watch TLSRoute linked to Gateway diff --git a/operator/pkg/kvstore/nodesgc/cell.go b/operator/pkg/kvstore/nodesgc/cell.go new file mode 100644 index 0000000000000..16ef886b1295d --- /dev/null +++ b/operator/pkg/kvstore/nodesgc/cell.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package nodesgc + +import ( + "github.com/cilium/hive/cell" + "github.com/spf13/pflag" +) + +// Cell handles the garbage collection of stale node entries from the kvstore. +var Cell = cell.Module( + "kvstore-nodes-gc", + "GC of stale KVStore node entries", + + cell.Config(Config{Enable: true}), + cell.ProvidePrivate(newGC), + cell.Invoke(func(*gc) {}), +) + +type Config struct { + Enable bool `mapstructure:"synchronize-k8s-nodes"` +} + +func (def Config) Flags(flags *pflag.FlagSet) { + flags.Bool("synchronize-k8s-nodes", def.Enable, "Perform GC of stale node entries from the KVStore") +} diff --git a/operator/pkg/kvstore/nodesgc/gc.go b/operator/pkg/kvstore/nodesgc/gc.go new file mode 100644 index 0000000000000..a39f326936f36 --- /dev/null +++ b/operator/pkg/kvstore/nodesgc/gc.go @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package nodesgc + +import ( + "context" + "fmt" + "log/slog" + "path" + "time" + + "github.com/cilium/hive/cell" + "github.com/cilium/hive/job" + "k8s.io/client-go/util/workqueue" + + operatorK8s "github.com/cilium/cilium/operator/k8s" + operatorOption "github.com/cilium/cilium/operator/option" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" + cilium_api_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + k8sClient "github.com/cilium/cilium/pkg/k8s/client" + "github.com/cilium/cilium/pkg/k8s/resource" + slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels" + "github.com/cilium/cilium/pkg/k8s/utils" + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/kvstore/store" + "github.com/cilium/cilium/pkg/logging/logfields" + nodeStore "github.com/cilium/cilium/pkg/node/store" + nodeTypes "github.com/cilium/cilium/pkg/node/types" +) + +var ( + // wqRateLimiter is the rate limiter used for the working queue. It can be overridden for testing purposes. + wqRateLimiter = workqueue.NewTypedItemExponentialFailureRateLimiter[nodeName](1*time.Second, 120*time.Second) + + // kvstoreUpsertQueueDelay is the delay before checking whether a newly observed kvstore entry should be + // deleted or not. It is meant to give time to the corresponding CiliumNode to be created as well before + // attempting deletion, as an additional safety measure in addition to checking whether a Cilium agent is + // running on that node. It can be overridden for testing purposes. + kvstoreUpsertQueueDelay = 1 * time.Minute +) + +type nodeName string + +type gc struct { + logger *slog.Logger + jg job.Group + cinfo cmtypes.ClusterInfo + + client kvstore.Client + ciliumNodes resource.Resource[*cilium_api_v2.CiliumNode] + pods resource.Resource[*slim_corev1.Pod] + podsSelector labels.Selector + + queue workqueue.TypedRateLimitingInterface[nodeName] +} + +func newGC(in struct { + cell.In + + Logger *slog.Logger + JobGroup job.Group + + Config Config + ClusterInfo cmtypes.ClusterInfo + + WQMetricsProvider workqueue.MetricsProvider + + Clientset k8sClient.Clientset + KVStoreClient kvstore.Client + StoreFactory store.Factory + + CiliumNodes resource.Resource[*cilium_api_v2.CiliumNode] + Pods resource.Resource[*slim_corev1.Pod] +}) (*gc, error) { + if !in.Clientset.IsEnabled() || !in.KVStoreClient.IsEnabled() || !in.Config.Enable { + return nil, nil + } + + selector, err := labels.Parse(operatorOption.Config.CiliumPodLabels) + if err != nil { + return nil, fmt.Errorf("unable to parse cilium pod selector: %w", err) + } + + g := gc{ + logger: in.Logger, + jg: in.JobGroup, + cinfo: in.ClusterInfo, + + client: in.KVStoreClient, + ciliumNodes: in.CiliumNodes, + pods: in.Pods, + podsSelector: selector, + + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + wqRateLimiter, + workqueue.TypedRateLimitingQueueConfig[nodeName]{ + Name: "kvstore-nodes", + MetricsProvider: in.WQMetricsProvider, + }, + ), + } + + in.JobGroup.Add( + job.OneShot("gc", g.run), + job.OneShot("watch-k8s", func(ctx context.Context, health cell.Health) error { + health.OK("Primed") + + for ev := range g.ciliumNodes.Events(ctx) { + switch ev.Kind { + case resource.Delete: + // Enqueue deleted CiliumNodes, to GC the corresponding kvstore + // entry if still present. + g.queue.Add(nodeName(ev.Key.Name)) + case resource.Sync: + health.OK("Synced") + } + + ev.Done(nil) + } + + // We are terminating, shutdown the queue as well + health.OK("Shutting down") + g.queue.ShutDownWithDrain() + return nil + }), + job.OneShot("watch-kvstore", func(ctx context.Context, health cell.Health) error { + health.OK("Primed") + in.StoreFactory.NewWatchStore(g.cinfo.Name, nodeStore.KeyCreator, &observer{g.queue}, + store.RWSWithOnSyncCallback(func(context.Context) { health.OK("Synced") }), + ).Watch(ctx, g.client, path.Join(nodeStore.NodeStorePrefix, g.cinfo.Name)) + return nil + }), + ) + + return &g, nil +} + +func (g *gc) run(ctx context.Context, health cell.Health) error { + health.OK("Initializing") + + ciliumNodes, err := g.ciliumNodes.Store(ctx) + if err != nil { + return fmt.Errorf("retrieving CiliumNodes store: %w", err) + } + + pods, err := g.pods.Store(ctx) + if err != nil { + return fmt.Errorf("retrieving Pods store: %w", err) + } + + health.OK("Initialized") + for g.processNextWorkItem(func(nodeName nodeName) error { + // Check if the CiliumNode still exists, or got recreated, as we don't + // need to do anything in that case. + if _, exists, err := ciliumNodes.GetByKey(resource.Key{Name: string(nodeName)}); err != nil { + return fmt.Errorf("retrieving CiliumNode %q: %w", nodeName, err) + } else if exists { + return nil + } + + // Check if a Cilium agent is still running on the given node, and + // in that case retry later, because it would recognize the deletion + // event and recreate the kvstore entry right away. Hence, defeating + // the whole purpose of this GC logic, and leading to the node entry + // being eventually deleted by the lease expiration only. + found, err := pods.ByIndex(operatorK8s.PodNodeNameIndex, string(nodeName)) + if err != nil { + return fmt.Errorf("retrieving pods indexed by node %q: %w", nodeName, err) + } + + for _, pod := range found { + if utils.IsPodRunning(pod.Status) && g.podsSelector.Matches(labels.Set(pod.Labels)) { + return fmt.Errorf("delaying deletion from kvstore, as Cilium agent is still running on %q", nodeName) + } + } + + key := path.Join(nodeStore.NodeStorePrefix, nodeTypes.GetKeyNodeName(g.cinfo.Name, string(nodeName))) + if err := g.client.Delete(ctx, key); err != nil { + return fmt.Errorf("deleting node from kvstore: %w", err) + } + + g.logger.Info("Successfully deleted stale node entry from kvstore", logfields.NodeName, nodeName) + health.OK("Stale node entry GCed") + return nil + }) { + } + + return nil +} + +func (g *gc) processNextWorkItem(handler func(nodeName nodeName) error) bool { + nodeName, quit := g.queue.Get() + if quit { + return false + } + + defer g.queue.Done(nodeName) + + err := handler(nodeName) + if err == nil { + if g.queue.NumRequeues(nodeName) > 0 { + g.logger.Info("Successfully reconciled node after retries", logfields.NodeName, nodeName) + } + g.queue.Forget(nodeName) + return true + } + + const silentRetries = 5 + log := g.logger.Info + if g.queue.NumRequeues(nodeName) >= silentRetries { + log = g.logger.Warn + } + + log("Failed reconciling node, will retry", + logfields.Error, err, + logfields.NodeName, nodeName, + ) + + g.queue.AddRateLimited(nodeName) + return true +} + +type observer struct { + queue workqueue.TypedRateLimitingInterface[nodeName] +} + +func (o *observer) OnUpdate(key store.Key) { + // Add the entry after a delay, as an extra safety measure, in addition to + // checking whether there's no Cilium pod running on that node, to prevent + // deleting newly created entries due to race conditions. + o.queue.AddAfter(nodeName(key.(*nodeTypes.Node).Name), kvstoreUpsertQueueDelay) +} + +func (o *observer) OnDelete(store.NamedKey) {} diff --git a/operator/pkg/kvstore/nodesgc/script_test.go b/operator/pkg/kvstore/nodesgc/script_test.go new file mode 100644 index 0000000000000..586bc5088afec --- /dev/null +++ b/operator/pkg/kvstore/nodesgc/script_test.go @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package nodesgc + +import ( + "context" + "flag" + "log/slog" + "maps" + "testing" + + uhive "github.com/cilium/hive" + "github.com/cilium/hive/cell" + "github.com/cilium/hive/hivetest" + "github.com/cilium/hive/script" + "github.com/cilium/hive/script/scripttest" + "github.com/cilium/statedb" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/util/workqueue" + + operatorK8s "github.com/cilium/cilium/operator/k8s" + operatorOption "github.com/cilium/cilium/operator/option" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" + "github.com/cilium/cilium/pkg/hive" + k8sClient "github.com/cilium/cilium/pkg/k8s/client/testutils" + k8sTestutils "github.com/cilium/cilium/pkg/k8s/testutils" + "github.com/cilium/cilium/pkg/k8s/version" + "github.com/cilium/cilium/pkg/kvstore" + "github.com/cilium/cilium/pkg/kvstore/store" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/testutils" + "github.com/cilium/cilium/pkg/time" +) + +var debug = flag.Bool("debug", false, "Enable debug logging") + +func TestScript(t *testing.T) { + defer testutils.GoleakVerifyNone(t) + + version.Force(k8sTestutils.DefaultVersion) + + var opts []hivetest.LogOption + if *debug { + opts = append(opts, hivetest.LogLevel(slog.LevelDebug)) + logging.SetLogLevelToDebug() + } + log := hivetest.Logger(t, opts...) + + // Override the settings for testing purposes + wqRateLimiter = workqueue.NewTypedItemExponentialFailureRateLimiter[nodeName](10*time.Millisecond, 10*time.Millisecond) + kvstoreUpsertQueueDelay = 0 * time.Second + operatorOption.Config.CiliumPodLabels = "k8s-app=cilium" + + setup := func(t testing.TB, args []string) *script.Engine { + h := hive.New( + cell.Config(cmtypes.DefaultClusterInfo), + + cell.Provide( + func() store.Factory { return store.NewFactory(hivetest.Logger(t), store.MetricsProvider()) }, + + func(db *statedb.DB) (kvstore.Client, uhive.ScriptCmdsOut) { + client := kvstore.NewInMemoryClient(db, "__local__") + return client, uhive.NewScriptCmds(kvstore.Commands(client)) + }, + ), + + k8sClient.FakeClientCell(), + operatorK8s.ResourcesCell, + + Cell, + ) + + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + h.RegisterFlags(flags) + + // Parse the shebang arguments in the script. + require.NoError(t, flags.Parse(args), "flags.Parse") + + t.Cleanup(func() { + assert.NoError(t, h.Stop(log, context.Background())) + }) + + cmds, err := h.ScriptCommands(log) + require.NoError(t, err, "ScriptCommands") + maps.Insert(cmds, maps.All(script.DefaultCmds())) + + return &script.Engine{ + Cmds: cmds, + RetryInterval: 10 * time.Millisecond, + } + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + scripttest.Test(t, + ctx, + setup, + []string{}, + "testdata/*.txtar") +} diff --git a/operator/pkg/kvstore/nodesgc/testdata/disabled.txtar b/operator/pkg/kvstore/nodesgc/testdata/disabled.txtar new file mode 100644 index 0000000000000..09d85451ed0ac --- /dev/null +++ b/operator/pkg/kvstore/nodesgc/testdata/disabled.txtar @@ -0,0 +1,42 @@ +#! --cluster-name=foo --synchronize-k8s-nodes=false + +# Initialize the kvstore state with a bunch of nodes. +kvstore/update cilium/state/nodes/v1/foo/node-1 node-1.json +kvstore/update cilium/state/nodes/v1/foo/node-2 node-2.json +kvstore/update cilium/state/nodes/v1/foo/node-3 node-3.json + +# Initialize the kubernetes state with a bunch of CiliumNodes. +k8s/add cilium-node-1.yaml + +hive/start + +# Wait for a little bit for things to settle. +sleep 500ms + +# Assert that nothing got touched, as GC is disabled. +kvstore/list --keys-only cilium/state/nodes keys.actual +cmp keys.actual keys.expected + +### + +-- node-1.json -- +{ "cluster": "foo", "name": "node-1" } + +-- node-2.json -- +{ "cluster": "foo", "name": "node-2" } + +-- node-3.json -- +{ "cluster": "foo", "name": "node-3" } + + +-- cilium-node-1.yaml -- +apiVersion: cilium.io/v2 +kind: CiliumNode +metadata: + name: node-1 + + +-- keys.expected -- +# cilium/state/nodes/v1/foo/node-1 +# cilium/state/nodes/v1/foo/node-2 +# cilium/state/nodes/v1/foo/node-3 diff --git a/operator/pkg/kvstore/nodesgc/testdata/enabled.txtar b/operator/pkg/kvstore/nodesgc/testdata/enabled.txtar new file mode 100644 index 0000000000000..2d13a5bada546 --- /dev/null +++ b/operator/pkg/kvstore/nodesgc/testdata/enabled.txtar @@ -0,0 +1,178 @@ +#! --cluster-name=foo + +# Initialize the kvstore state with a bunch of nodes. +kvstore/update cilium/state/nodes/v1/foo/node-1 node-1.json +kvstore/update cilium/state/nodes/v1/foo/node-2 node-2.json +kvstore/update cilium/state/nodes/v1/foo/node-3 node-3.json +kvstore/update cilium/state/nodes/v1/other/cluster node-other-cluster.json + +# Initialize the kubernetes state with a bunch of CiliumNodes and pods. +k8s/add cilium-node-1.yaml +k8s/add cilium-node-2.yaml +k8s/add cilium-node-4.yaml + +k8s/add pod-node-1a.yaml +k8s/add pod-node-1b.yaml +k8s/add pod-node-3a.yaml +k8s/add pod-node-5a.yaml + +hive/start + +# Assert that the nodes for which there's no corresponding CiliumNode are deleted. +# Entries for other clusters should not be touched. +kvstore/list --keys-only cilium/state/nodes keys.actual +* cmp keys.actual keys.expected.v1 + +# Creating a kvstore entry for a non-existing CiliumNode should cause it to be +# immediately deleted (as we are setting the delay to 0 for testing purposes). +kvstore/update cilium/state/nodes/v1/foo/node-3 node-3.json + +# Wait for garbage collection. +kvstore/list --keys-only cilium/state/nodes keys.actual +* cmp keys.actual keys.expected.v1 + +# Creating a kvstore entry for an existing CiliumNode should not trigger GC. +kvstore/update cilium/state/nodes/v1/foo/node-4 node-4.json + +# Wait for a bit of time so that things can settle, and ensure that the entry +# was actually not deleted. +sleep 50ms + +kvstore/list --keys-only cilium/state/nodes keys.actual +cmp keys.actual keys.expected.v2 + +# Deleting a CiliumNode should trigger the GC of the corresponding kvstore entry. +k8s/delete cilium-node-2.yaml + +# Wait for garbage collection. +kvstore/list --keys-only cilium/state/nodes keys.actual +* cmp keys.actual keys.expected.v3 + +# Deleting a CiliumNode should not trigger the GC of the corresponding kvstore +# entry if a Cilium pod is still running. +k8s/delete cilium-node-1.yaml + +# Wait for a bit of time so that things can settle, and ensure that the entry +# was actually not deleted. +sleep 50ms + +kvstore/list --keys-only cilium/state/nodes keys.actual +cmp keys.actual keys.expected.v3 + +# Deleting the pod should now cause the entry to be deleted. +k8s/delete pod-node-1a.yaml + +# Wait for garbage collection. +kvstore/list --keys-only cilium/state/nodes keys.actual +* cmp keys.actual keys.expected.v4 + +# Creating a kvstore entry for a node where Cilium is running should not trigger GC. +kvstore/update cilium/state/nodes/v1/foo/node-5 node-5.json + +# Wait for a bit of time so that things can settle, and ensure that the entry +# was actually not deleted. +sleep 50ms + +kvstore/list --keys-only cilium/state/nodes keys.actual +cmp keys.actual keys.expected.v5 + +### + +-- node-1.json -- +{ "cluster": "foo", "name": "node-1" } + +-- node-2.json -- +{ "cluster": "foo", "name": "node-2" } + +-- node-3.json -- +{ "cluster": "foo", "name": "node-3" } + +-- node-4.json -- +{ "cluster": "foo", "name": "node-4" } + +-- node-5.json -- +{ "cluster": "foo", "name": "node-5" } + +-- node-other-cluster.json -- +{ "cluster": "other", "name": "cluster" } + + +-- cilium-node-1.yaml -- +apiVersion: cilium.io/v2 +kind: CiliumNode +metadata: + name: node-1 + +-- cilium-node-2.yaml -- +apiVersion: cilium.io/v2 +kind: CiliumNode +metadata: + name: node-2 + +-- cilium-node-4.yaml -- +apiVersion: cilium.io/v2 +kind: CiliumNode +metadata: + name: node-4 + +-- pod-node-1a.yaml -- +apiVersion: v1 +kind: Pod +metadata: + name: cilium-42lwq + labels: + k8s-app: cilium + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium +spec: + nodeName: node-1 + +-- pod-node-1b.yaml -- +apiVersion: v1 +kind: Pod +metadata: + name: foobar +spec: + nodeName: node-1 + +-- pod-node-3a.yaml -- +apiVersion: v1 +kind: Pod +metadata: + name: qux + labels: + k8s-app: other +spec: + nodeName: node-3 + +-- pod-node-5a.yaml -- +apiVersion: v1 +kind: Pod +metadata: + name: cilium-cl4dn + labels: + k8s-app: cilium +spec: + nodeName: node-5 + + +-- keys.expected.v1 -- +# cilium/state/nodes/v1/foo/node-1 +# cilium/state/nodes/v1/foo/node-2 +# cilium/state/nodes/v1/other/cluster +-- keys.expected.v2 -- +# cilium/state/nodes/v1/foo/node-1 +# cilium/state/nodes/v1/foo/node-2 +# cilium/state/nodes/v1/foo/node-4 +# cilium/state/nodes/v1/other/cluster +-- keys.expected.v3 -- +# cilium/state/nodes/v1/foo/node-1 +# cilium/state/nodes/v1/foo/node-4 +# cilium/state/nodes/v1/other/cluster +-- keys.expected.v4 -- +# cilium/state/nodes/v1/foo/node-4 +# cilium/state/nodes/v1/other/cluster +-- keys.expected.v5 -- +# cilium/state/nodes/v1/foo/node-4 +# cilium/state/nodes/v1/foo/node-5 +# cilium/state/nodes/v1/other/cluster diff --git a/operator/pkg/lbipam/lbipam.go b/operator/pkg/lbipam/lbipam.go index b3e69d6dafecf..82cb6211a4465 100644 --- a/operator/pkg/lbipam/lbipam.go +++ b/operator/pkg/lbipam/lbipam.go @@ -1679,17 +1679,15 @@ func (ipam *LBIPAM) updatePoolCounts(pool *cilium_api_v2.CiliumLoadBalancerIPPoo totalCounts.Used += used } - if ipam.setPoolCondition(pool, ciliumPoolIPsTotalCondition, meta_v1.ConditionUnknown, "noreason", totalCounts.Total.String()) || - ipam.setPoolCondition(pool, ciliumPoolIPsAvailableCondition, meta_v1.ConditionUnknown, "noreason", totalCounts.Available.String()) || - ipam.setPoolCondition(pool, ciliumPoolIPsUsedCondition, meta_v1.ConditionUnknown, "noreason", strconv.FormatUint(totalCounts.Used, 10)) { - modifiedPoolStatus = true - } + totalChanged := ipam.setPoolCondition(pool, ciliumPoolIPsTotalCondition, meta_v1.ConditionUnknown, "noreason", totalCounts.Total.String()) + availableChanged := ipam.setPoolCondition(pool, ciliumPoolIPsAvailableCondition, meta_v1.ConditionUnknown, "noreason", totalCounts.Available.String()) + usedChanged := ipam.setPoolCondition(pool, ciliumPoolIPsUsedCondition, meta_v1.ConditionUnknown, "noreason", strconv.FormatUint(totalCounts.Used, 10)) available, _ := new(big.Float).SetInt(totalCounts.Available).Float64() ipam.metrics.AvailableIPs.WithLabelValues(pool.Name).Set(available) ipam.metrics.UsedIPs.WithLabelValues(pool.Name).Set(float64(totalCounts.Used)) - return modifiedPoolStatus + return totalChanged || availableChanged || usedChanged } func (ipam *LBIPAM) setPoolCondition( diff --git a/operator/pkg/lbipam/lbipam_test.go b/operator/pkg/lbipam/lbipam_test.go index 4422e1d726dc7..4cff2cc71e4cc 100644 --- a/operator/pkg/lbipam/lbipam_test.go +++ b/operator/pkg/lbipam/lbipam_test.go @@ -824,6 +824,9 @@ func TestServiceDelete(t *testing.T) { }, }, } + ipsUsed := getPoolStatusCount(fixture.GetPool("pool-a"), ciliumPoolIPsUsedCondition) + require.Equal(t, "0", ipsUsed) + fixture.UpsertSvc(t, svcA) svcA = fixture.GetSvc("default", "service-a") @@ -834,6 +837,8 @@ func TestServiceDelete(t *testing.T) { if net.ParseIP(svcA.Status.LoadBalancer.Ingress[0].IP).To4() == nil { t.Error("Expected service to receive a IPv4 address") } + ipsUsed = getPoolStatusCount(fixture.GetPool("pool-a"), ciliumPoolIPsUsedCondition) + require.Equal(t, "1", ipsUsed) svcIP := svcA.Status.LoadBalancer.Ingress[0].IP @@ -846,6 +851,9 @@ func TestServiceDelete(t *testing.T) { if _, has := fixture.lbipam.rangesStore.ranges[0].alloc.Get(netip.MustParseAddr(svcIP)); has { t.Fatal("Service IP hasn't been released") } + ipsUsed = getPoolStatusCount(fixture.GetPool("pool-a"), ciliumPoolIPsUsedCondition) + require.Equal(t, "0", ipsUsed) + } // TestReallocOnInit tests the edge case where an existing service has an IP assigned for which there is no IP Pool. @@ -2856,3 +2864,69 @@ func TestLBIPAMRestartOnFullPool(t *testing.T) { } } } + +func TestPoolShrink(t *testing.T) { + // Pool with two blocks + poolA := mkPool(poolAUID, "pool-a", []string{"10.0.10.0/24", "10.0.11.0/24"}) + fixture := mkTestFixture(t, true, true) + fixture.UpsertPool(t, poolA) + + // Check initial state + p := fixture.GetPool("pool-a") + totalCount := getPoolStatusCount(p, ciliumPoolIPsTotalCondition) + require.Equal(t, "512", totalCount, "IPsTotal should be 512 initially") + availableCount := getPoolStatusCount(p, ciliumPoolIPsAvailableCondition) + require.Equal(t, "512", availableCount, "IPsAvailable should be 512 initially") + usedCount := getPoolStatusCount(p, ciliumPoolIPsUsedCondition) + require.Equal(t, "0", usedCount, "IPsUsed should be 0 initially") + + // Create service to allocate one IP + svcA := &slim_core_v1.Service{ + ObjectMeta: slim_meta_v1.ObjectMeta{ + Name: "service-a", + Namespace: "default", + UID: serviceAUID, + }, + Spec: slim_core_v1.ServiceSpec{ + Type: slim_core_v1.ServiceTypeLoadBalancer, + IPFamilies: []slim_core_v1.IPFamily{ + slim_core_v1.IPv4Protocol, + }, + }, + } + fixture.UpsertSvc(t, svcA) + svcA = fixture.GetSvc("default", "service-a") + require.Len(t, svcA.Status.LoadBalancer.Ingress, 1, "Expected service to receive exactly one ingress IP") + + p = fixture.GetPool("pool-a") + require.Equal(t, "512", getPoolStatusCount(p, ciliumPoolIPsTotalCondition), "IPsTotal should be 512") + require.Equal(t, "511", getPoolStatusCount(p, ciliumPoolIPsAvailableCondition), "IPsAvailable should be 511") + require.Equal(t, "1", getPoolStatusCount(p, ciliumPoolIPsUsedCondition), "IPsUsed should be 1") + + svcIP, err := netip.ParseAddr(svcA.Status.LoadBalancer.Ingress[0].IP) + require.NoError(t, err) + blockToKeep := "10.0.10.0/24" + if !netip.MustParsePrefix(blockToKeep).Contains(svcIP) { + blockToKeep = "10.0.11.0/24" + } + + poolA = fixture.GetPool("pool-a") + poolA.Spec.Blocks = []cilium_api_v2.CiliumLoadBalancerIPPoolIPBlock{ + {Cidr: cilium_api_v2.IPv4orIPv6CIDR(blockToKeep)}, + } + fixture.UpsertPool(t, poolA) + + p = fixture.GetPool("pool-a") + require.Equal(t, "256", getPoolStatusCount(p, ciliumPoolIPsTotalCondition), "IPsTotal should be 256") + require.Equal(t, "255", getPoolStatusCount(p, ciliumPoolIPsAvailableCondition), "IPsAvailable should be 255") + require.Equal(t, "1", getPoolStatusCount(p, ciliumPoolIPsUsedCondition), "IPsUsed should be 1") +} + +func getPoolStatusCount(pool *cilium_api_v2.CiliumLoadBalancerIPPool, condType string) string { + for _, cond := range pool.Status.Conditions { + if cond.Type == condType { + return cond.Message + } + } + return "" +} diff --git a/operator/watchers/cilium_node_gc.go b/operator/watchers/cilium_node_gc.go index e660279bb2a70..d03aa09515fc2 100644 --- a/operator/watchers/cilium_node_gc.go +++ b/operator/watchers/cilium_node_gc.go @@ -5,7 +5,6 @@ package watchers import ( "context" - "fmt" "log/slog" "strings" "sync" @@ -13,13 +12,13 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "github.com/cilium/cilium/pkg/controller" cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" k8sClient "github.com/cilium/cilium/pkg/k8s/client" ciliumv2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2" + "github.com/cilium/cilium/pkg/k8s/resource" "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/logging/logfields" ) @@ -67,7 +66,7 @@ func (c *ciliumNodeGCCandidate) Delete(nodeName string) { } // RunCiliumNodeGC performs garbage collector for cilium node resource -func RunCiliumNodeGC(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset, ciliumNodeStore cache.Store, interval time.Duration, logger *slog.Logger, +func RunCiliumNodeGC(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset, ciliumNodes resource.Resource[*cilium_v2.CiliumNode], interval time.Duration, logger *slog.Logger, mp workqueue.MetricsProvider) { nodesInit(wg, clientset.Slim(), ctx.Done(), mp) @@ -78,6 +77,11 @@ func RunCiliumNodeGC(ctx context.Context, wg *sync.WaitGroup, clientset k8sClien return } + ciliumNodeStore, err := ciliumNodes.Store(ctx) + if err != nil { + return + } + logger.InfoContext(ctx, "Starting to garbage collect stale CiliumNode custom resources") candidateStore := newCiliumNodeGCCandidate() @@ -102,9 +106,13 @@ func RunCiliumNodeGC(ctx context.Context, wg *sync.WaitGroup, clientset k8sClien }() } -func performCiliumNodeGC(ctx context.Context, client ciliumv2.CiliumNodeInterface, ciliumNodeStore cache.Store, +func performCiliumNodeGC(ctx context.Context, client ciliumv2.CiliumNodeInterface, ciliumNodeStore resource.Store[*cilium_v2.CiliumNode], nodeGetter slimNodeGetter, interval time.Duration, candidateStore *ciliumNodeGCCandidate, logger *slog.Logger) error { - for _, nodeName := range ciliumNodeStore.ListKeys() { + iter := ciliumNodeStore.IterKeys() + for iter.Next() { + key := iter.Key() + nodeName := key.Name + scopedLog := logger.With(logfields.NodeName, nodeName) _, err := nodeGetter.GetK8sSlimNode(nodeName) if err == nil { @@ -117,18 +125,12 @@ func performCiliumNodeGC(ctx context.Context, client ciliumv2.CiliumNodeInterfac return err } - obj, _, err := ciliumNodeStore.GetByKey(nodeName) + cn, _, err := ciliumNodeStore.GetByKey(key) if err != nil { scopedLog.ErrorContext(ctx, "Unable to fetch CiliumNode from store", logfields.Error, err) return err } - cn, ok := obj.(*cilium_v2.CiliumNode) - if !ok { - scopedLog.ErrorContext(ctx, fmt.Sprintf("Object stored in store is not *cilium_v2.CiliumNode but %T", obj)) - return err - } - // if there is owner references, let k8s handle garbage collection if len(cn.GetOwnerReferences()) > 0 { continue diff --git a/operator/watchers/cilium_node_gc_test.go b/operator/watchers/cilium_node_gc_test.go index 917bdd93dd885..7fba0ae807c55 100644 --- a/operator/watchers/cilium_node_gc_test.go +++ b/operator/watchers/cilium_node_gc_test.go @@ -11,34 +11,30 @@ import ( "github.com/stretchr/testify/assert" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" + operatorK8s "github.com/cilium/cilium/operator/k8s" v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake" + k8sClient "github.com/cilium/cilium/pkg/k8s/client/testutils" slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" ) func Test_performCiliumNodeGC(t *testing.T) { - cns := []runtime.Object{ - &v2.CiliumNode{ + cns := []*v2.CiliumNode{ + { ObjectMeta: metav1.ObjectMeta{ Name: "valid-node", }, - }, - &v2.CiliumNode{ + }, { ObjectMeta: metav1.ObjectMeta{ Name: "invalid-node", }, - }, - &v2.CiliumNode{ + }, { ObjectMeta: metav1.ObjectMeta{ Name: "invalid-node-with-owner-ref", OwnerReferences: []metav1.OwnerReference{{}}, }, - }, - &v2.CiliumNode{ + }, { ObjectMeta: metav1.ObjectMeta{ Name: "invalid-node-with-annotation", Annotations: map[string]string{skipGCAnnotationKey: "true"}, @@ -46,12 +42,19 @@ func Test_performCiliumNodeGC(t *testing.T) { }, } - fcn := fake.NewSimpleClientset(cns...).CiliumV2().CiliumNodes() - fCNStore := cache.NewStore(cache.MetaNamespaceKeyFunc) + _, cs := k8sClient.NewFakeClientset(hivetest.Logger(t)) + fcn := cs.CiliumV2().CiliumNodes() for _, cn := range cns { - fCNStore.Add(cn) + _, err := fcn.Create(t.Context(), cn, metav1.CreateOptions{}) + assert.NoError(t, err) } + ciliumNodes, err := operatorK8s.CiliumNodeResource(hivetest.Lifecycle(t), cs, nil) + assert.NoError(t, err) + + store, err := ciliumNodes.Store(t.Context()) + assert.NoError(t, err) + interval := time.Nanosecond fng := &fakeNodeGetter{ OnGetK8sSlimNode: func(nodeName string) (*slim_corev1.Node, error) { @@ -65,7 +68,7 @@ func Test_performCiliumNodeGC(t *testing.T) { candidateStore := newCiliumNodeGCCandidate() // check if the invalid node is added to GC candidate - err := performCiliumNodeGC(t.Context(), fcn, fCNStore, fng, interval, candidateStore, hivetest.Logger(t)) + err = performCiliumNodeGC(t.Context(), fcn, store, fng, interval, candidateStore, hivetest.Logger(t)) assert.NoError(t, err) assert.Len(t, candidateStore.nodesToRemove, 1) _, exists := candidateStore.nodesToRemove["invalid-node"] @@ -73,7 +76,7 @@ func Test_performCiliumNodeGC(t *testing.T) { // check if the invalid node is actually GC-ed time.Sleep(interval) - err = performCiliumNodeGC(t.Context(), fcn, fCNStore, fng, interval, candidateStore, hivetest.Logger(t)) + err = performCiliumNodeGC(t.Context(), fcn, store, fng, interval, candidateStore, hivetest.Logger(t)) assert.NoError(t, err) assert.Empty(t, candidateStore.nodesToRemove) _, exists = candidateStore.nodesToRemove["invalid-node"] diff --git a/pkg/annotation/k8s.go b/pkg/annotation/k8s.go index bc3fee13948fe..f23df4dc0119b 100644 --- a/pkg/annotation/k8s.go +++ b/pkg/annotation/k8s.go @@ -218,6 +218,8 @@ const ( CECInjectCiliumFilters = CECPrefix + "/inject-cilium-filters" CECIsL7LB = CECPrefix + "/is-l7lb" CECUseOriginalSourceAddress = CECPrefix + "/use-original-source-address" + + NoTrackHostPorts = NetworkPrefix + "/no-track-host-ports" ) // CiliumPrefixRegex is a regex matching Cilium specific annotations. diff --git a/pkg/aws/eni/node.go b/pkg/aws/eni/node.go index 57f2648b8971e..b13a52760c3ca 100644 --- a/pkg/aws/eni/node.go +++ b/pkg/aws/eni/node.go @@ -958,7 +958,7 @@ func (n *Node) getEffectiveIPLimits(eni *eniTypes.ENI, limits int) (leftoverPref // findSubnetInSameRouteTableWithNodeSubnet returns the subnet with the most addresses // that is in the same route table as the node's subnet to make sure the pod traffic -// leaving secondary interfaces will be routed as the primary interface. +// leaving secondary interfaces is routed in the same way as the primary interface. func (n *Node) findSubnetInSameRouteTableWithNodeSubnet() *ipamTypes.Subnet { n.mutex.RLock() defer n.mutex.RUnlock() @@ -975,15 +975,13 @@ func (n *Node) findSubnetInSameRouteTableWithNodeSubnet() *ipamTypes.Subnet { for _, routeTable := range n.manager.routeTables { if _, ok := routeTable.Subnets[nodeSubnetID]; ok && routeTable.VirtualNetworkID == n.k8sObj.Spec.ENI.VpcID { - for _, subnetID := range n.k8sObj.Spec.ENI.SubnetIDs { + for subnetID := range routeTable.Subnets { if subnetID == nodeSubnetID { continue } - if _, ok := routeTable.Subnets[subnetID]; ok { - subnet := n.manager.subnets[subnetID] - if bestSubnet == nil || subnet.AvailableAddresses > bestSubnet.AvailableAddresses { - bestSubnet = subnet - } + subnet := n.manager.subnets[subnetID] + if (bestSubnet == nil || subnet.AvailableAddresses > bestSubnet.AvailableAddresses) && subnet.AvailabilityZone == n.k8sObj.Spec.ENI.AvailabilityZone { + bestSubnet = subnet } } } @@ -993,7 +991,7 @@ func (n *Node) findSubnetInSameRouteTableWithNodeSubnet() *ipamTypes.Subnet { } // checkSubnetInSameRouteTableWithNodeSubnet checks if the given subnet is in the same route table as the node's subnet -// to make sure the pod traffic leaving secondary interfaces will be routed as the primary interface. +// to make sure the pod traffic leaving secondary interfaces is routed in the same way as the primary interface. func (n *Node) checkSubnetInSameRouteTableWithNodeSubnet(subnet *ipamTypes.Subnet) bool { n.mutex.RLock() defer n.mutex.RUnlock() diff --git a/pkg/aws/eni/node_test.go b/pkg/aws/eni/node_test.go index b697b6f1883c1..1fb9135c3ad13 100644 --- a/pkg/aws/eni/node_test.go +++ b/pkg/aws/eni/node_test.go @@ -53,13 +53,14 @@ func Test_findSubnetInSameRouteTableWithNodeSubnet(t *testing.T) { Subnets: map[string]struct{}{ "subnet-1": {}, "subnet-2": {}, + "subnet-3": {}, }, }, "rt-2": &ipamTypes.RouteTable{ ID: "rt-2", VirtualNetworkID: "vpc-2", Subnets: map[string]struct{}{ - "subnet-3": {}, + "subnet-4": {}, }, }, } @@ -68,9 +69,9 @@ func Test_findSubnetInSameRouteTableWithNodeSubnet(t *testing.T) { k8sObj: &v2.CiliumNode{ Spec: v2.NodeSpec{ ENI: types.ENISpec{ - VpcID: "vpc-1", - NodeSubnetID: "subnet-1", - SubnetIDs: []string{"subnet-1", "subnet-2", "subnet-3"}, + VpcID: "vpc-1", + NodeSubnetID: "subnet-1", + AvailabilityZone: "us-east-1a", }, }, }, @@ -79,14 +80,22 @@ func Test_findSubnetInSameRouteTableWithNodeSubnet(t *testing.T) { "subnet-1": { ID: "subnet-1", AvailableAddresses: 10, + AvailabilityZone: "us-east-1a", }, "subnet-2": { ID: "subnet-2", AvailableAddresses: 20, + AvailabilityZone: "us-east-1a", }, "subnet-3": { ID: "subnet-3", + AvailableAddresses: 25, + AvailabilityZone: "us-east-1b", + }, + "subnet-4": { + ID: "subnet-4", AvailableAddresses: 15, + AvailabilityZone: "us-east-1a", }, }, routeTables: routeTableMap, diff --git a/pkg/bgpv1/manager/reconciler/preflight_test.go b/pkg/bgpv1/manager/reconciler/preflight_test.go index 1c57af21e4b5b..35838f4e42db5 100644 --- a/pkg/bgpv1/manager/reconciler/preflight_test.go +++ b/pkg/bgpv1/manager/reconciler/preflight_test.go @@ -22,6 +22,7 @@ import ( slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/svcrouteconfig" ) // We use similar local listen ports as the tests in the pkg/bgpv1/test package. @@ -249,7 +250,7 @@ func TestReconcileAfterServerReinit(t *testing.T) { require.NoError(t, err) diffstore.Upsert(obj) - reconciler := NewServiceReconciler(diffstore, epDiffStore) + reconciler := NewServiceReconciler(diffstore, epDiffStore, svcrouteconfig.DefaultConfig) err = reconciler.Reconciler.Reconcile(context.Background(), params) require.NoError(t, err) @@ -271,7 +272,7 @@ func TestReconcileAfterServerReinit(t *testing.T) { require.NoError(t, err) // Update LB service - reconciler = NewServiceReconciler(diffstore, epDiffStore) + reconciler = NewServiceReconciler(diffstore, epDiffStore, svcrouteconfig.DefaultConfig) err = reconciler.Reconciler.Reconcile(context.Background(), params) require.NoError(t, err) } diff --git a/pkg/bgpv1/manager/reconciler/service.go b/pkg/bgpv1/manager/reconciler/service.go index 727437c12894a..2786d34e1e50d 100644 --- a/pkg/bgpv1/manager/reconciler/service.go +++ b/pkg/bgpv1/manager/reconciler/service.go @@ -25,6 +25,7 @@ import ( slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" "github.com/cilium/cilium/pkg/loadbalancer" ciliumslices "github.com/cilium/cilium/pkg/slices" + "github.com/cilium/cilium/pkg/svcrouteconfig" ) type LBServiceReconcilerOut struct { @@ -34,14 +35,15 @@ type LBServiceReconcilerOut struct { } type ServiceReconciler struct { - diffStore store.DiffStore[*slim_corev1.Service] - epDiffStore store.DiffStore[*k8s.Endpoints] + diffStore store.DiffStore[*slim_corev1.Service] + epDiffStore store.DiffStore[*k8s.Endpoints] + routesConfig svcrouteconfig.RoutesConfig } // LBServiceReconcilerMetadata keeps a map of services to the respective advertised Paths type LBServiceReconcilerMetadata map[resource.Key][]*types.Path -type localServices map[loadbalancer.ServiceName]struct{} +type routableServices map[loadbalancer.ServiceName]struct{} // pathReference holds reference information about an advertised path type pathReference struct { @@ -52,15 +54,16 @@ type pathReference struct { // pathReferencesMap holds path references of resources producing path advertisement, indexed by path's NLRI string type pathReferencesMap map[string]*pathReference -func NewServiceReconciler(diffStore store.DiffStore[*slim_corev1.Service], epDiffStore store.DiffStore[*k8s.Endpoints]) LBServiceReconcilerOut { +func NewServiceReconciler(diffStore store.DiffStore[*slim_corev1.Service], epDiffStore store.DiffStore[*k8s.Endpoints], routesConfig svcrouteconfig.RoutesConfig) LBServiceReconcilerOut { if diffStore == nil { return LBServiceReconcilerOut{} } return LBServiceReconcilerOut{ Reconciler: &ServiceReconciler{ - diffStore: diffStore, - epDiffStore: epDiffStore, + diffStore: diffStore, + epDiffStore: epDiffStore, + routesConfig: routesConfig, }, } } @@ -134,8 +137,8 @@ func (r *ServiceReconciler) requiresFullReconciliation(p ReconcileParams) bool { } // Populate locally available services used for externalTrafficPolicy=local handling -func (r *ServiceReconciler) populateLocalServices(localNodeName string) (localServices, error) { - ls := make(localServices) +func (r *ServiceReconciler) populateRoutableServices(localNodeName string) (routableServices, error) { + rs := make(routableServices) epList, err := r.epDiffStore.List() if err != nil { @@ -144,7 +147,7 @@ func (r *ServiceReconciler) populateLocalServices(localNodeName string) (localSe endpointsLoop: for _, eps := range epList { - _, exists, err := r.resolveSvcFromEndpoints(eps) + svc, exists, err := r.resolveSvcFromEndpoints(eps) if err != nil { // Cannot resolve service from endpoints. We have nothing to do here. continue @@ -155,25 +158,36 @@ endpointsLoop: continue } + if len(eps.Backends) == 0 && !r.routesConfig.EnableNoServiceEndpointsRoutable { + continue + } + svcID := eps.ServiceName - for _, be := range eps.Backends { - if !be.Conditions.IsTerminating() && be.NodeName == localNodeName { - // At least one endpoint is available on this node. We - // can make unavailable to available. - if _, found := ls[svcID]; !found { - ls[svcID] = struct{}{} + if svc.Spec.ExternalTrafficPolicy == slim_corev1.ServiceExternalTrafficPolicyLocal || + (svc.Spec.InternalTrafficPolicy != nil && *svc.Spec.InternalTrafficPolicy == slim_corev1.ServiceInternalTrafficPolicyLocal) { + for _, be := range eps.Backends { + if !be.Conditions.IsTerminating() && be.NodeName == localNodeName { + // At least one endpoint is available on this node. We + // can make unavailable to available. + if _, found := rs[svcID]; !found { + rs[svcID] = struct{}{} + } + continue endpointsLoop } - continue endpointsLoop } + } else { + // For eTP/iTP=Cluster any endpoint is valid + rs[svcID] = struct{}{} } + } - return ls, nil + return rs, nil } -func hasLocalEndpoints(svc *slim_corev1.Service, ls localServices) bool { - _, found := ls[loadbalancer.NewServiceName(svc.GetNamespace(), svc.GetName())] +func hasValidEndpoints(svc *slim_corev1.Service, rs routableServices) bool { + _, found := rs[loadbalancer.NewServiceName(svc.GetNamespace(), svc.GetName())] return found } @@ -184,12 +198,12 @@ func (r *ServiceReconciler) fullReconciliation(ctx context.Context, p ReconcileP if err != nil { return err } - ls, err := r.populateLocalServices(p.CiliumNode.Name) + rs, err := r.populateRoutableServices(p.CiliumNode.Name) if err != nil { return err } for _, svc := range toReconcile { - if err := r.reconcileService(ctx, p.CurrentServer, p.DesiredConfig, svc, ls, pathRefs); err != nil { + if err := r.reconcileService(ctx, p.CurrentServer, p.DesiredConfig, svc, rs, pathRefs); err != nil { return fmt.Errorf("failed to reconcile service %s/%s: %w", svc.Namespace, svc.Name, err) } } @@ -208,7 +222,7 @@ func (r *ServiceReconciler) svcDiffReconciliation(ctx context.Context, p Reconci if err != nil { return err } - ls, err := r.populateLocalServices(p.CiliumNode.Name) + ls, err := r.populateRoutableServices(p.CiliumNode.Name) if err != nil { return err } @@ -316,7 +330,7 @@ func (r *ServiceReconciler) diffReconciliationServiceList(sc *instance.ServerWit // svcDesiredRoutes determines which, if any routes should be announced for the given service. This determines the // desired state. -func (r *ServiceReconciler) svcDesiredRoutes(newc *v2alpha1api.CiliumBGPVirtualRouter, svc *slim_corev1.Service, ls localServices) ([]netip.Prefix, error) { +func (r *ServiceReconciler) svcDesiredRoutes(newc *v2alpha1api.CiliumBGPVirtualRouter, svc *slim_corev1.Service, rs routableServices) ([]netip.Prefix, error) { if newc.ServiceSelector == nil { // If the vRouter has no service selector, there are no desired routes. return nil, nil @@ -338,22 +352,21 @@ func (r *ServiceReconciler) svcDesiredRoutes(newc *v2alpha1api.CiliumBGPVirtualR for _, svcAdv := range newc.ServiceAdvertisements { switch svcAdv { case v2alpha1api.BGPLoadBalancerIPAddr: - desiredRoutes = append(desiredRoutes, r.lbSvcDesiredRoutes(svc, ls)...) + desiredRoutes = append(desiredRoutes, r.lbSvcDesiredRoutes(svc, rs)...) case v2alpha1api.BGPClusterIPAddr: - desiredRoutes = append(desiredRoutes, r.clusterIPDesiredRoutes(svc, ls)...) + desiredRoutes = append(desiredRoutes, r.clusterIPDesiredRoutes(svc, rs)...) case v2alpha1api.BGPExternalIPAddr: - desiredRoutes = append(desiredRoutes, r.externalIPDesiredRoutes(svc, ls)...) + desiredRoutes = append(desiredRoutes, r.externalIPDesiredRoutes(svc, rs)...) } } return desiredRoutes, err } -func (r *ServiceReconciler) externalIPDesiredRoutes(svc *slim_corev1.Service, ls localServices) []netip.Prefix { +func (r *ServiceReconciler) externalIPDesiredRoutes(svc *slim_corev1.Service, rs routableServices) []netip.Prefix { var desiredRoutes []netip.Prefix // Ignore externalTrafficPolicy == Local && no local endpoints. - if svc.Spec.ExternalTrafficPolicy == slim_corev1.ServiceExternalTrafficPolicyLocal && - !hasLocalEndpoints(svc, ls) { + if !hasValidEndpoints(svc, rs) && (!r.routesConfig.EnableNoServiceEndpointsRoutable || svc.Spec.ExternalTrafficPolicy == slim_corev1.ServiceExternalTrafficPolicyLocal) { return desiredRoutes } for _, extIP := range svc.Spec.ExternalIPs { @@ -369,11 +382,10 @@ func (r *ServiceReconciler) externalIPDesiredRoutes(svc *slim_corev1.Service, ls return desiredRoutes } -func (r *ServiceReconciler) clusterIPDesiredRoutes(svc *slim_corev1.Service, ls localServices) []netip.Prefix { +func (r *ServiceReconciler) clusterIPDesiredRoutes(svc *slim_corev1.Service, rs routableServices) []netip.Prefix { var desiredRoutes []netip.Prefix // Ignore internalTrafficPolicy == Local && no local endpoints. - if svc.Spec.InternalTrafficPolicy != nil && *svc.Spec.InternalTrafficPolicy == slim_corev1.ServiceInternalTrafficPolicyLocal && - !hasLocalEndpoints(svc, ls) { + if !hasValidEndpoints(svc, rs) && (!r.routesConfig.EnableNoServiceEndpointsRoutable || svc.Spec.InternalTrafficPolicy != nil && *svc.Spec.InternalTrafficPolicy == slim_corev1.ServiceInternalTrafficPolicyLocal) { return desiredRoutes } if svc.Spec.ClusterIP == "" || len(svc.Spec.ClusterIPs) == 0 || svc.Spec.ClusterIP == corev1.ClusterIPNone { @@ -399,14 +411,13 @@ func (r *ServiceReconciler) clusterIPDesiredRoutes(svc *slim_corev1.Service, ls return desiredRoutes } -func (r *ServiceReconciler) lbSvcDesiredRoutes(svc *slim_corev1.Service, ls localServices) []netip.Prefix { +func (r *ServiceReconciler) lbSvcDesiredRoutes(svc *slim_corev1.Service, rs routableServices) []netip.Prefix { var desiredRoutes []netip.Prefix if svc.Spec.Type != slim_corev1.ServiceTypeLoadBalancer { return desiredRoutes } // Ignore externalTrafficPolicy == Local && no local endpoints. - if svc.Spec.ExternalTrafficPolicy == slim_corev1.ServiceExternalTrafficPolicyLocal && - !hasLocalEndpoints(svc, ls) { + if !hasValidEndpoints(svc, rs) && (!r.routesConfig.EnableNoServiceEndpointsRoutable || svc.Spec.ExternalTrafficPolicy == slim_corev1.ServiceExternalTrafficPolicyLocal) { return desiredRoutes } // Ignore service managed by an unsupported LB class. @@ -428,9 +439,9 @@ func (r *ServiceReconciler) lbSvcDesiredRoutes(svc *slim_corev1.Service, ls loca } // reconcileService gets the desired routes of a given service and makes sure that is what is being announced. -func (r *ServiceReconciler) reconcileService(ctx context.Context, sc *instance.ServerWithConfig, newc *v2alpha1api.CiliumBGPVirtualRouter, svc *slim_corev1.Service, ls localServices, pathRefs pathReferencesMap) error { +func (r *ServiceReconciler) reconcileService(ctx context.Context, sc *instance.ServerWithConfig, newc *v2alpha1api.CiliumBGPVirtualRouter, svc *slim_corev1.Service, rs routableServices, pathRefs pathReferencesMap) error { - desiredRoutes, err := r.svcDesiredRoutes(newc, svc, ls) + desiredRoutes, err := r.svcDesiredRoutes(newc, svc, rs) if err != nil { return fmt.Errorf("failed to retrieve svc desired routes: %w", err) } diff --git a/pkg/bgpv1/manager/reconciler/service_test.go b/pkg/bgpv1/manager/reconciler/service_test.go index d4219111e2b8a..a050ebd9b1c11 100644 --- a/pkg/bgpv1/manager/reconciler/service_test.go +++ b/pkg/bgpv1/manager/reconciler/service_test.go @@ -25,6 +25,7 @@ import ( slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" "github.com/cilium/cilium/pkg/loadbalancer" + "github.com/cilium/cilium/pkg/svcrouteconfig" ) func TestServiceReconcilerWithLoadBalancer(t *testing.T) { @@ -640,7 +641,7 @@ func TestServiceReconcilerWithLoadBalancer(t *testing.T) { diffstore := store.NewFakeDiffStore[*slim_corev1.Service]() epDiffStore := store.NewFakeDiffStore[*k8s.Endpoints]() - reconciler := NewServiceReconciler(diffstore, epDiffStore).Reconciler.(*ServiceReconciler) + reconciler := NewServiceReconciler(diffstore, epDiffStore, svcrouteconfig.DefaultConfig).Reconciler.(*ServiceReconciler) reconciler.Init(testSC) defer reconciler.Cleanup(testSC) @@ -1292,7 +1293,7 @@ func TestServiceReconcilerWithClusterIP(t *testing.T) { diffstore := store.NewFakeDiffStore[*slim_corev1.Service]() epDiffStore := store.NewFakeDiffStore[*k8s.Endpoints]() - reconciler := NewServiceReconciler(diffstore, epDiffStore).Reconciler.(*ServiceReconciler) + reconciler := NewServiceReconciler(diffstore, epDiffStore, svcrouteconfig.DefaultConfig).Reconciler.(*ServiceReconciler) reconciler.Init(testSC) defer reconciler.Cleanup(testSC) @@ -1943,7 +1944,7 @@ func TestServiceReconcilerWithExternalIP(t *testing.T) { diffstore := store.NewFakeDiffStore[*slim_corev1.Service]() epDiffStore := store.NewFakeDiffStore[*k8s.Endpoints]() - reconciler := NewServiceReconciler(diffstore, epDiffStore).Reconciler.(*ServiceReconciler) + reconciler := NewServiceReconciler(diffstore, epDiffStore, svcrouteconfig.DefaultConfig).Reconciler.(*ServiceReconciler) reconciler.Init(testSC) defer reconciler.Cleanup(testSC) @@ -2173,7 +2174,7 @@ func TestEPUpdateOnly(t *testing.T) { diffstore := store.NewFakeDiffStore[*slim_corev1.Service]() epDiffStore := store.NewFakeDiffStore[*k8s.Endpoints]() - reconciler := NewServiceReconciler(diffstore, epDiffStore).Reconciler.(*ServiceReconciler) + reconciler := NewServiceReconciler(diffstore, epDiffStore, svcrouteconfig.DefaultConfig).Reconciler.(*ServiceReconciler) reconciler.Init(testSC) defer reconciler.Cleanup(testSC) @@ -2314,7 +2315,7 @@ func TestServiceReconcilerWithExternalIPAndClusterIP(t *testing.T) { diffstore := store.NewFakeDiffStore[*slim_corev1.Service]() epDiffStore := store.NewFakeDiffStore[*k8s.Endpoints]() - reconciler := NewServiceReconciler(diffstore, epDiffStore).Reconciler.(*ServiceReconciler) + reconciler := NewServiceReconciler(diffstore, epDiffStore, svcrouteconfig.DefaultConfig).Reconciler.(*ServiceReconciler) reconciler.Init(testSC) defer reconciler.Cleanup(testSC) diff --git a/pkg/bgpv1/manager/reconcilerv2/service.go b/pkg/bgpv1/manager/reconcilerv2/service.go index 85e9a7c524373..e6c5c5a7de56d 100644 --- a/pkg/bgpv1/manager/reconcilerv2/service.go +++ b/pkg/bgpv1/manager/reconcilerv2/service.go @@ -28,6 +28,7 @@ import ( "github.com/cilium/cilium/pkg/loadbalancer" ciliumoption "github.com/cilium/cilium/pkg/option" "github.com/cilium/cilium/pkg/rate" + "github.com/cilium/cilium/pkg/svcrouteconfig" "github.com/cilium/cilium/pkg/time" ) @@ -47,8 +48,9 @@ type ServiceReconcilerIn struct { DaemonConfig *ciliumoption.DaemonConfig Signaler *signaler.BGPCPSignaler - DB *statedb.DB - Frontends statedb.Table[*loadbalancer.Frontend] + DB *statedb.DB + Frontends statedb.Table[*loadbalancer.Frontend] + RoutesConfig svcrouteconfig.RoutesConfig } type ServiceReconciler struct { @@ -59,6 +61,7 @@ type ServiceReconciler struct { db *statedb.DB frontends statedb.Table[*loadbalancer.Frontend] metadata map[string]ServiceReconcilerMetadata + routesConfig svcrouteconfig.RoutesConfig } // ServiceReconcilerMetadata holds per-instance reconciler state. @@ -82,6 +85,7 @@ func NewServiceReconciler(in ServiceReconcilerIn) ServiceReconcilerOut { db: in.DB, frontends: in.Frontends, metadata: make(map[string]ServiceReconcilerMetadata), + routesConfig: in.RoutesConfig, } in.JobGroup.Add( job.OneShot("frontend-events", r.processFrontendEvents), @@ -367,13 +371,19 @@ func (r *ServiceReconciler) updateServiceAdvertisementsMetadata(p ReconcileParam r.setMetadata(p.BGPInstance, serviceMetadata) } -func hasLocalBackends(p ReconcileParams, fe *loadbalancer.Frontend) bool { +// hasBackends loops through Frontend backends and returns: +// 1) true, false - backends > 0, no local backend +// 2) true, true - backends > 0, at least 1 local backend +// 3) false, false - no backends, no local backend +func hasBackends(p ReconcileParams, fe *loadbalancer.Frontend) (hasBackends, hasLocalBackends bool) { for backend := range fe.Backends { + hasBackends = true if backend.NodeName == p.CiliumNode.Name && backend.State == loadbalancer.BackendStateActive { - return true + hasLocalBackends = true + return } } - return false + return } func (r *ServiceReconciler) fullReconciliationServiceList(p ReconcileParams) (toReconcile []*loadbalancer.Service, toWithdraw []loadbalancer.ServiceName, rx statedb.ReadTxn, err error) { @@ -436,7 +446,6 @@ func (r *ServiceReconciler) diffReconciliationServiceList(p ReconcileParams) (to } func (r *ServiceReconciler) getDesiredPaths(p ReconcileParams, desiredPeerAdverts PeerAdvertisements, toReconcile []*loadbalancer.Service, toWithdraw []loadbalancer.ServiceName, rx statedb.ReadTxn) (ResourceAFPathsMap, error) { - desiredServiceAFPaths := make(ResourceAFPathsMap) for _, svc := range toReconcile { key := resource.Key{Name: svc.Name.Name(), Namespace: svc.Name.Namespace()} @@ -543,8 +552,10 @@ func (r *ServiceReconciler) getExternalIPPaths(p ReconcileParams, frontends []*l if fe.Type != loadbalancer.SVCTypeExternalIPs { continue } - // Ignore externalTrafficPolicy == Local && no local EPs. - if fe.Service.ExtTrafficPolicy == loadbalancer.SVCTrafficPolicyLocal && !hasLocalBackends(p, fe) { + + hasBackends, hasLocalBackends := hasBackends(p, fe) + // Ignore externalTrafficPolicy == Local && no local EPs or ignore when there are no backends and EnableNoServiceEndpointsRoutable == false. + if (fe.Service.ExtTrafficPolicy == loadbalancer.SVCTrafficPolicyLocal && !hasLocalBackends) || (!r.routesConfig.EnableNoServiceEndpointsRoutable && !hasBackends) { continue } @@ -566,8 +577,10 @@ func (r *ServiceReconciler) getClusterIPPaths(p ReconcileParams, frontends []*lo if fe.Type != loadbalancer.SVCTypeClusterIP { continue } - // Ignore internalTrafficPolicy == Local && no local EPs. - if fe.Service.IntTrafficPolicy == loadbalancer.SVCTrafficPolicyLocal && !hasLocalBackends(p, fe) { + + hasBackends, hasLocalBackends := hasBackends(p, fe) + // Ignore internalTrafficPolicy == Local && no local EPs or ignore when there are no backends and EnableNoServiceEndpointsRoutable == false. + if fe.Service.IntTrafficPolicy == loadbalancer.SVCTrafficPolicyLocal && !hasLocalBackends || (!r.routesConfig.EnableNoServiceEndpointsRoutable && !hasBackends) { continue } @@ -594,8 +607,10 @@ func (r *ServiceReconciler) getLoadBalancerIPPaths(p ReconcileParams, svc *loadb if fe.Type != loadbalancer.SVCTypeLoadBalancer { continue } - // Ignore externalTrafficPolicy == Local && no local EPs. - if fe.Service.ExtTrafficPolicy == loadbalancer.SVCTrafficPolicyLocal && !hasLocalBackends(p, fe) { + + hasBackends, hasLocalBackends := hasBackends(p, fe) + // Ignore externalTrafficPolicy == Local && no local EPs or ignore when there are no backends and EnableNoServiceEndpointsRoutable == false. + if (fe.Service.ExtTrafficPolicy == loadbalancer.SVCTrafficPolicyLocal && !hasLocalBackends) || (!r.routesConfig.EnableNoServiceEndpointsRoutable && !hasBackends) { continue } diff --git a/pkg/bgpv1/manager/reconcilerv2/service_test.go b/pkg/bgpv1/manager/reconcilerv2/service_test.go index 6b48189ae290a..deb524416555b 100644 --- a/pkg/bgpv1/manager/reconcilerv2/service_test.go +++ b/pkg/bgpv1/manager/reconcilerv2/service_test.go @@ -33,6 +33,7 @@ import ( "github.com/cilium/cilium/pkg/loadbalancer" ciliumoption "github.com/cilium/cilium/pkg/option" "github.com/cilium/cilium/pkg/source" + "github.com/cilium/cilium/pkg/svcrouteconfig" ) // svcTestStep represents one step in the service reconciler test execution. @@ -2594,6 +2595,7 @@ func newServiceTestFixture(t *testing.T, config option.BGPConfig) *svcTestFixtur return loadbalancer.Config{} }, ), + svcrouteconfig.Cell, cell.Invoke(func(db *statedb.DB, table statedb.RWTable[*loadbalancer.Frontend]) { f.db = db f.frontends = table diff --git a/pkg/bgpv1/test/fixtures.go b/pkg/bgpv1/test/fixtures.go index b895fcd663d2f..9b28597c8043d 100644 --- a/pkg/bgpv1/test/fixtures.go +++ b/pkg/bgpv1/test/fixtures.go @@ -40,6 +40,7 @@ import ( "github.com/cilium/cilium/pkg/loadbalancer" "github.com/cilium/cilium/pkg/metrics" "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/svcrouteconfig" ) // cilium BGP config @@ -189,6 +190,9 @@ func newFixture(t testing.TB, ctx context.Context, conf fixtureConfig) (*fixture // CiliumLoadBalancerIPPool cell.Provide(k8sPkg.LBIPPoolsResource), + // Routes config + cell.Config(svcrouteconfig.DefaultConfig), + // cilium node cell.Provide(func(lc cell.Lifecycle, c k8sClient.Clientset, mp workqueue.MetricsProvider) daemon_k8s.LocalCiliumNodeResource { store := resource.New[*cilium_api_v2.CiliumNode]( diff --git a/pkg/bgpv1/test/script_test.go b/pkg/bgpv1/test/script_test.go index e51af5a03356f..b9a0dec80e9ed 100644 --- a/pkg/bgpv1/test/script_test.go +++ b/pkg/bgpv1/test/script_test.go @@ -30,12 +30,14 @@ import ( "github.com/cilium/cilium/pkg/datapath/linux/safenetlink" "github.com/cilium/cilium/pkg/datapath/tables" envoyCfg "github.com/cilium/cilium/pkg/envoy/config" + "github.com/cilium/cilium/pkg/hive" "github.com/cilium/cilium/pkg/kpr" "github.com/cilium/cilium/pkg/loadbalancer" lbcell "github.com/cilium/cilium/pkg/loadbalancer/cell" "github.com/cilium/cilium/pkg/maglev" "github.com/cilium/cilium/pkg/node" "github.com/cilium/cilium/pkg/source" + "github.com/cilium/cilium/pkg/svcrouteconfig" ciliumhive "github.com/cilium/cilium/pkg/hive" ipamOption "github.com/cilium/cilium/pkg/ipam/option" @@ -55,9 +57,10 @@ const ( testLinkName = "cilium-bgp-test" // test arguments - testPeeringIPsFlag = "test-peering-ips" - ipamFlag = "ipam" - probeTCPMD5Flag = "probe-tcp-md5" + testPeeringIPsFlag = "test-peering-ips" + bgpNoEndpointsRoutableFlag = "bgp-no-endpoints-routable" + ipamFlag = "ipam" + probeTCPMD5Flag = "probe-tcp-md5" ) func TestPrivilegedScript(t *testing.T) { @@ -86,6 +89,7 @@ func TestPrivilegedScript(t *testing.T) { peeringIPs := flags.StringSlice(testPeeringIPsFlag, nil, "List of IPs used for peering in the test") ipam := flags.String(ipamFlag, ipamOption.IPAMKubernetes, "IPAM used by the test") probeTCPMD5 := flags.Bool(probeTCPMD5Flag, false, "Probe if TCP_MD5SIG socket option is available") + noEndpointsRoutable := flags.Bool(bgpNoEndpointsRoutableFlag, true, "") require.NoError(t, flags.Parse(args), "Error parsing test flags") if *probeTCPMD5 { @@ -101,6 +105,7 @@ func TestPrivilegedScript(t *testing.T) { // BGP cell bgpv1.Cell, + svcrouteconfig.Cell, // Provide statedb tables cell.Provide( @@ -152,6 +157,12 @@ func TestPrivilegedScript(t *testing.T) { }), ) + hive.AddConfigOverride( + h, + func(cfg *svcrouteconfig.RoutesConfig) { + cfg.EnableNoServiceEndpointsRoutable = *noEndpointsRoutable + }) + hiveLog := hivetest.Logger(t, hivetest.LogLevel(slog.LevelInfo)) t.Cleanup(func() { assert.NoError(t, h.Stop(hiveLog, context.TODO())) diff --git a/pkg/bgpv1/test/testdata/svc-no-endpoints.txtar b/pkg/bgpv1/test/testdata/svc-no-endpoints.txtar new file mode 100644 index 0000000000000..b75d87ee61735 --- /dev/null +++ b/pkg/bgpv1/test/testdata/svc-no-endpoints.txtar @@ -0,0 +1,168 @@ +#! --test-peering-ips=10.99.4.111,10.99.4.112 --bgp-no-endpoints-routable=false + +# Tests sharing of the same LB VIP across multiple services. +# VIP should be advertised if one of the shared services disappears but the other one remains. + +# Start the hive +hive start + +# Configure gobgp server +gobgp/add-server test 65010 10.99.4.111 1790 + +# Configure peers on GoBGP +gobgp/add-peer 10.99.4.112 65001 + +# Add k8s services +k8s/add service-1.yaml + +# Configure BGP on Cilium +k8s/add cilium-node.yaml bgp-node-config.yaml bgp-peer-config.yaml bgp-advertisement.yaml + +# Wait for peering to be established +gobgp/wait-state 10.99.4.112 ESTABLISHED + +# Validate that LB IP is NOT advertised +gobgp/routes -o routes.actual +* cmp gobgp-routes-empty.expected routes.actual + +# Add corresponding endpointSlices +k8s/add endpoints1.yaml + +# Validate that LB IP is advertised +gobgp/routes -o routes.actual +* cmp gobgp-routes.expected routes.actual + +# Delete one service +k8s/delete service-1.yaml +sleep 0.1s # give some time for the change to propagate to avoid false positive + +# Validate that LB IP is still advertised +gobgp/routes -o routes.actual +* cmp gobgp-routes-empty.expected routes.actual + +##### + +-- cilium-node.yaml -- +apiVersion: cilium.io/v2 +kind: CiliumNode +metadata: + name: test-node +spec: + addresses: + - ip: 10.99.4.112 + type: InternalIP + ipam: + podCIDRs: + - 10.244.1.0/24 + +-- bgp-node-config.yaml -- +apiVersion: cilium.io/v2 +kind: CiliumBGPNodeConfig +metadata: + name: test-node +spec: + bgpInstances: + - localASN: 65001 + name: tor-65001 + peers: + - name: gobgp-peer-1 + peerASN: 65010 + peerAddress: 10.99.4.111 + localAddress: 10.99.4.112 + peerConfigRef: + name: gobgp-peer-config + +-- bgp-peer-config.yaml -- +apiVersion: cilium.io/v2 +kind: CiliumBGPPeerConfig +metadata: + name: gobgp-peer-config +spec: + transport: + peerPort: 1790 + timers: + connectRetryTimeSeconds: 1 + families: + - afi: ipv4 + safi: unicast + advertisements: + matchLabels: + advertise: services + +-- bgp-advertisement.yaml -- +apiVersion: cilium.io/v2 +kind: CiliumBGPAdvertisement +metadata: + name: lb-only + labels: + advertise: services +spec: + advertisements: + - advertisementType: Service + service: + addresses: + - LoadBalancerIP + selector: + matchExpressions: + - { key: bgp, operator: In, values: [ advertise ] } + +-- service-1.yaml -- +apiVersion: v1 +kind: Service +metadata: + name: echo1 + namespace: test + labels: + bgp: advertise + annotations: + "lbipam.cilium.io/sharing-key": "1234" +spec: + type: LoadBalancer + clusterIP: 10.96.50.104 + clusterIPs: + - 10.96.50.104 + externalTrafficPolicy: Cluster + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + name: echo + sessionAffinity: None +status: + loadBalancer: + ingress: + - ip: 172.16.1.1 + +-- endpoints1.yaml -- +apiVersion: discovery.k8s.io/v1 +kind: EndpointSlice +metadata: + labels: + kubernetes.io/service-name: echo1 + name: echo-eps1 + namespace: test +addressType: IPv4 +endpoints: +- addresses: + - 10.244.1.20 + conditions: + ready: true + serving: true + terminating: false + nodeName: test-node +ports: +- name: http + port: 80 + protocol: TCP + +-- gobgp-routes.expected -- +Prefix NextHop Attrs +172.16.1.1/32 10.99.4.112 [{Origin: i} {AsPath: 65001} {Nexthop: 10.99.4.112}] +-- gobgp-routes-empty.expected -- +Prefix NextHop Attrs diff --git a/pkg/bpf/analyze/blocks.go b/pkg/bpf/analyze/blocks.go index e6b80236e4075..40a78e6fd3f56 100644 --- a/pkg/bpf/analyze/blocks.go +++ b/pkg/bpf/analyze/blocks.go @@ -170,7 +170,6 @@ type Block struct { predecessors []*Block branch *Block fthrough *Block - predict uint8 } func (b *Block) leader(insns asm.Instructions) *leader { @@ -312,17 +311,6 @@ func (b *Block) Dump(insns asm.Instructions) string { sb.WriteString("\n") } - if b.predict != 0 { - sb.WriteString("Predict: ") - switch b.predict { - case 1: - sb.WriteString("branch taken\n") - case 2: - sb.WriteString("fallthrough taken\n") - default: - } - } - return sb.String() } @@ -350,6 +338,10 @@ type Blocks struct { // l is a bitmap tracking reachable blocks. l bitmap + + // j is a bitmap tracking predicted jumps. If the nth bit is 1, the jump + // at the end of block n is predicted to always be taken. + j bitmap } // LiveInstructions returns a sequence of [asm.Instruction]s held by Blocks. The @@ -439,7 +431,20 @@ func (bl *Blocks) Dump(insns asm.Instructions) string { for _, block := range bl.b { sb.WriteString(fmt.Sprintf("\n=== Block %d ===\n", block.id)) sb.WriteString(block.Dump(insns)) - sb.WriteString(fmt.Sprintf("Live: %t\n", bl.l.get(uint64(block.id)))) + + // No reachability information yet. + if len(bl.l) == 0 { + continue + } + + sb.WriteString(fmt.Sprintf("Live: %t, ", bl.l.get(uint64(block.id)))) + sb.WriteString("branch: ") + if bl.j.get(uint64(block.id)) { + sb.WriteString("jump") + } else { + sb.WriteString("fallthrough") + } + sb.WriteString("\n") } return sb.String() } diff --git a/pkg/bpf/analyze/reachability.go b/pkg/bpf/analyze/reachability.go index ec189dfc61a0f..64d0805a50969 100644 --- a/pkg/bpf/analyze/reachability.go +++ b/pkg/bpf/analyze/reachability.go @@ -142,13 +142,15 @@ func Reachability(blocks *Blocks, insns asm.Instructions, variables map[string]V } live := newBitmap(uint64(blocks.count())) + jumps := newBitmap(uint64(blocks.count())) // Start recursing at first block since it is always live. - if err := visitBlock(blocks.first(), insns, vars, live); err != nil { + if err := visitBlock(blocks.first(), insns, vars, live, jumps); err != nil { return nil, fmt.Errorf("predicting blocks: %w", err) } blocks.l = live + blocks.j = jumps return blocks, nil } @@ -232,11 +234,11 @@ type mapOffset struct { // unpredictableBlock is called when the branch cannot be predicted. It visits // both the branch and fallthrough blocks. -func unpredictableBlock(b *Block, insns asm.Instructions, vars map[mapOffset]VariableSpec, live bitmap) error { - if err := visitBlock(b.branch, insns, vars, live); err != nil { +func unpredictableBlock(b *Block, insns asm.Instructions, vars map[mapOffset]VariableSpec, live, jumps bitmap) error { + if err := visitBlock(b.branch, insns, vars, live, jumps); err != nil { return fmt.Errorf("visiting branch block %d: %w", b.branch.id, err) } - if err := visitBlock(b.fthrough, insns, vars, live); err != nil { + if err := visitBlock(b.fthrough, insns, vars, live, jumps); err != nil { return fmt.Errorf("visiting fallthrough block %d: %w", b.fthrough.id, err) } return nil @@ -244,7 +246,7 @@ func unpredictableBlock(b *Block, insns asm.Instructions, vars map[mapOffset]Var // visitBlock recursively visits a block and its successors to determine // reachability based on the branch instructions and the provided vars. -func visitBlock(b *Block, insns asm.Instructions, vars map[mapOffset]VariableSpec, live bitmap) error { +func visitBlock(b *Block, insns asm.Instructions, vars map[mapOffset]VariableSpec, live, jumps bitmap) error { if b == nil { return nil } @@ -260,17 +262,17 @@ func visitBlock(b *Block, insns asm.Instructions, vars map[mapOffset]VariableSpe branch := findBranch(pull) if branch == nil { - return unpredictableBlock(b, insns, vars, live) + return unpredictableBlock(b, insns, vars, live, jumps) } deref := findDereference(pull, branch.Dst) if deref == nil { - return unpredictableBlock(b, insns, vars, live) + return unpredictableBlock(b, insns, vars, live, jumps) } load := findMapLoad(pull, deref.Src) if load == nil { - return unpredictableBlock(b, insns, vars, live) + return unpredictableBlock(b, insns, vars, live, jumps) } // TODO(tb): evalBranch doesn't currently take the deref's offset field into @@ -278,7 +280,7 @@ func visitBlock(b *Block, insns asm.Instructions, vars map[mapOffset]VariableSpe // to be more robust and remove this limitation. vs := lookupVariable(load, vars) if vs == nil || !vs.Constant() || vs.Size() > 8 { - return unpredictableBlock(b, insns, vars, live) + return unpredictableBlock(b, insns, vars, live, jumps) } jump, err := evalBranch(branch, vs) @@ -288,13 +290,12 @@ func visitBlock(b *Block, insns asm.Instructions, vars map[mapOffset]VariableSpe // If the branch is always taken, only visit the branch target. if jump { - b.predict = 1 - return visitBlock(b.branch, insns, vars, live) + jumps.set(b.id, true) + return visitBlock(b.branch, insns, vars, live, jumps) } // Otherwise, only visit the fallthrough target. - b.predict = 2 - return visitBlock(b.fthrough, insns, vars, live) + return visitBlock(b.fthrough, insns, vars, live, jumps) } // lookupVariable retrieves the VariableSpec for the given load instruction from diff --git a/pkg/bpf/analyze/reachability_test.go b/pkg/bpf/analyze/reachability_test.go index 1a5ecf5032a1e..39cadc225f829 100644 --- a/pkg/bpf/analyze/reachability_test.go +++ b/pkg/bpf/analyze/reachability_test.go @@ -13,6 +13,7 @@ import ( "github.com/cilium/ebpf/asm" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" ) // Simple example with a `__config_use_map_b` Variable acting as a feature flag. @@ -226,6 +227,36 @@ func TestReachabilityLongJump(t *testing.T) { assert.True(t, enabled.isLive(3)) } +// Test that Reachability can be called concurrently. This is a regression test +// for data races in Blocks and Block. Block should never be modified by +// reachability analysis as it is shared across all users of (copies of) a +// CollectionSpec. +func TestReachabilityConcurrent(t *testing.T) { + spec, err := ebpf.LoadCollectionSpec("../testdata/unused-map-pruning.o") + require.NoError(t, err) + + obj := struct { + Program *ebpf.ProgramSpec `ebpf:"sample_program"` + UseMapB *ebpf.VariableSpec `ebpf:"__config_use_map_b"` + }{} + require.NoError(t, spec.Assign(&obj)) + + // Predict first branch as taken. + obj.UseMapB.Set(true) + + blocks, err := computeBlocks(obj.Program.Instructions) + require.NoError(t, err) + + var eg errgroup.Group + for range 2 { + eg.Go(func() error { + _, err := Reachability(blocks, obj.Program.Instructions, VariableSpecs(spec.Variables)) + return err + }) + } + require.NoError(t, eg.Wait()) +} + func BenchmarkReachability(b *testing.B) { b.ReportAllocs() diff --git a/pkg/bpf/collection.go b/pkg/bpf/collection.go index 150e719eae8c4..f251711807b5c 100644 --- a/pkg/bpf/collection.go +++ b/pkg/bpf/collection.go @@ -12,7 +12,6 @@ import ( "strings" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/btf" "github.com/cilium/cilium/pkg/bpf/analyze" @@ -25,35 +24,6 @@ const ( callsMap = "cilium_calls" ) -// LoadCollectionSpec loads the eBPF ELF at the given path and parses it into a -// CollectionSpec. This spec is only a blueprint of the contents of the ELF and -// does not represent any live resources that have been loaded into the kernel. -// -// This is a wrapper around ebpf.LoadCollectionSpec that populates the object's -// calls map with programs marked with the __declare_tail() annotation. It -// performs static reachability analysis of tail call programs. Any unreachable -// tail call program is removed from the spec. -func LoadCollectionSpec(logger *slog.Logger, path string) (*ebpf.CollectionSpec, error) { - spec, err := ebpf.LoadCollectionSpec(path) - if err != nil { - return nil, err - } - - if err := checkUnspecifiedPrograms(spec); err != nil { - return nil, fmt.Errorf("checking for unspecified programs: %w", err) - } - - if err := removeUnreachableTailcalls(logger, spec); err != nil { - return nil, fmt.Errorf("removing unreachable tail calls: %w", err) - } - - if err := resolveTailCalls(spec); err != nil { - return nil, fmt.Errorf("resolving tail calls: %w", err) - } - - return spec, nil -} - // checkUnspecifiedPrograms returns an error if any of the programs in the spec // are of the UnspecifiedProgram type. func checkUnspecifiedPrograms(spec *ebpf.CollectionSpec) error { @@ -135,141 +105,6 @@ func resolveTailCalls(spec *ebpf.CollectionSpec) error { return nil } -// removeUnreachableTailcalls removes tail calls that are not reachable from -// entrypoint programs. This is done by traversing the call graph of the -// entrypoint programs and marking all reachable tail calls. Any tail call that -// is not marked is removed from the CollectionSpec. -func removeUnreachableTailcalls(logger *slog.Logger, spec *ebpf.CollectionSpec) error { - type tail struct { - referenced bool - visited bool - spec *ebpf.ProgramSpec - } - - // Build a map of entrypoint programs annotated with __section_entry. - entrypoints := make(map[string]*ebpf.ProgramSpec) - for _, prog := range spec.Programs { - if isEntrypoint(prog) { - entrypoints[prog.Name] = prog - } - } - - // Build a map of tail call slots to ProgramSpecs. - tailcalls := make(map[uint32]*tail) - for _, prog := range spec.Programs { - if !isTailCall(prog) { - continue - } - - slot, err := tailCallSlot(prog) - if err != nil { - return fmt.Errorf("getting tail call slot: %w", err) - } - - tailcalls[slot] = &tail{ - spec: prog, - } - } - - // Discover all tailcalls that are reachable from the given program. - visit := func(prog *ebpf.ProgramSpec, tailcalls map[uint32]*tail) error { - // We look back from any tailcall, so we expect there to always be 3 instructions ahead of any tail call instr. - for i := 3; i < len(prog.Instructions); i++ { - // The `tail_call_static` C function is always used to call tail calls when - // the map index is known at compile time. - // Due to inline ASM this generates the following instructions: - // Mov R1, Rx - // Mov R2, - // Mov R3, - // call tail_call - - // Find the tail call instruction. - inst := prog.Instructions[i] - if !inst.IsBuiltinCall() || inst.Constant != int64(asm.FnTailCall) { - continue - } - - // Check that the previous instruction is a mov of the tail call index. - movIdx := prog.Instructions[i-1] - if movIdx.OpCode.ALUOp() != asm.Mov || movIdx.Dst != asm.R3 { - continue - } - - // Check that the instruction before that is the load of the tail call map. - movR2 := prog.Instructions[i-2] - if movR2.OpCode != asm.LoadImmOp(asm.DWord) || movR2.Src != asm.PseudoMapFD { - continue - } - - ref := movR2.Reference() - - // Ignore static tail calls made to maps that are not the calls map - if ref != callsMap { - logger.Debug( - "skipping tail call into map other than the calls map", - logfields.Section, prog.SectionName, - logfields.Prog, prog.Name, - logfields.Instruction, i, - logfields.Reference, ref, - ) - continue - } - - tc := tailcalls[uint32(movIdx.Constant)] - if tc == nil { - return fmt.Errorf( - "potential missed tail call in program %s to slot %d at insn %d", - prog.Name, - movIdx.Constant, - i, - ) - } - - tc.referenced = true - } - - return nil - } - - // Discover all tailcalls that are reachable from the entrypoints. - for _, prog := range entrypoints { - if err := visit(prog, tailcalls); err != nil { - return err - } - } - - // Keep visiting tailcalls until no more are discovered. -reset: - for _, tailcall := range tailcalls { - // If a tailcall is referenced by an entrypoint or another tailcall we should visit it - if tailcall.referenced && !tailcall.visited { - if err := visit(tailcall.spec, tailcalls); err != nil { - return err - } - tailcall.visited = true - - // Visiting this tail call might have caused tail calls earlier in the list to become referenced, but this - // loop already skipped them. So reset the loop. If we already visited a tailcall we will ignore them anyway. - goto reset - } - } - - // Remove all tailcalls that are not referenced. - for _, tailcall := range tailcalls { - if !tailcall.referenced { - logger.Debug( - "unreferenced tail call, deleting", - logfields.Section, tailcall.spec.SectionName, - logfields.Prog, tailcall.spec.Name, - ) - - delete(spec.Programs, tailcall.spec.Name) - } - } - - return nil -} - // LoadAndAssign loads spec into the kernel and assigns the requested eBPF // objects to the given object. It is a wrapper around [LoadCollection]. See its // documentation for more details on the loading process. @@ -350,6 +185,10 @@ func LoadCollection(logger *slog.Logger, spec *ebpf.CollectionSpec, opts *Collec opts = &CollectionOptions{} } + if err := checkUnspecifiedPrograms(spec); err != nil { + return nil, nil, fmt.Errorf("checking for unspecified programs: %w", err) + } + opts.populateMapReplacements() logger.Debug("Loading Collection into kernel", @@ -369,6 +208,14 @@ func LoadCollection(logger *slog.Logger, spec *ebpf.CollectionSpec, opts *Collec return nil, nil, fmt.Errorf("applying variable overrides: %w", err) } + if err := removeUnusedTailcalls(logger, spec); err != nil { + return nil, nil, fmt.Errorf("removing unused tail calls: %w", err) + } + + if err := resolveTailCalls(spec); err != nil { + return nil, nil, fmt.Errorf("resolving tail calls: %w", err) + } + keep, err := removeUnusedMaps(spec, opts.Keep) if err != nil { return nil, nil, fmt.Errorf("pruning unused maps: %w", err) diff --git a/pkg/bpf/collection_test.go b/pkg/bpf/collection_test.go index f3bc8e1e787ff..42f11245f875e 100644 --- a/pkg/bpf/collection_test.go +++ b/pkg/bpf/collection_test.go @@ -14,30 +14,6 @@ import ( "github.com/cilium/cilium/pkg/testutils" ) -func TestRemoveUnreachableTailcalls(t *testing.T) { - logger := hivetest.Logger(t) - // Use upstream LoadCollectionSpec to defer the call to - // removeUnreachableTailcalls. - spec, err := ebpf.LoadCollectionSpec("testdata/unreachable-tailcall.o") - require.NoError(t, err) - - assert.Contains(t, spec.Programs, "cil_entry") - assert.Contains(t, spec.Programs, "a") - assert.Contains(t, spec.Programs, "b") - assert.Contains(t, spec.Programs, "c") - assert.Contains(t, spec.Programs, "d") - assert.Contains(t, spec.Programs, "e") - - require.NoError(t, removeUnreachableTailcalls(logger, spec)) - - assert.Contains(t, spec.Programs, "cil_entry") - assert.Contains(t, spec.Programs, "a") - assert.Contains(t, spec.Programs, "b") - assert.Contains(t, spec.Programs, "c") - assert.NotContains(t, spec.Programs, "d") - assert.NotContains(t, spec.Programs, "e") -} - func TestPrivilegedUpgradeMap(t *testing.T) { testutils.PrivilegedTest(t) logger := hivetest.Logger(t) @@ -55,7 +31,7 @@ func TestPrivilegedUpgradeMap(t *testing.T) { }, ebpf.MapOptions{PinPath: temp}) require.NoError(t, err) - spec, err := LoadCollectionSpec(logger, "testdata/upgrade-map.o") + spec, err := ebpf.LoadCollectionSpec("testdata/upgrade-map.o") require.NoError(t, err) // Use LoadAndAssign to make sure commit works through map upgrades. This is a diff --git a/pkg/bpf/testdata/unreachable-tailcall.c b/pkg/bpf/testdata/unreachable-tailcall.c index 15513936b4385..73475797dc215 100644 --- a/pkg/bpf/testdata/unreachable-tailcall.c +++ b/pkg/bpf/testdata/unreachable-tailcall.c @@ -1,10 +1,12 @@ #include #include "common.h" +#include + #include #include -volatile const int global_var = 0; +DECLARE_CONFIG(bool, use_tail_b, "Use tailcall B or C") #define TAIL_A 0 #define TAIL_B 1 @@ -36,7 +38,7 @@ static int b(void *ctx) { __declare_tail(TAIL_A) static int a(void *ctx) { - if (global_var == 0x01) { + if (CONFIG(use_tail_b)) { tail_call_static(ctx, cilium_calls, TAIL_B); } else { tail_call_static(ctx, cilium_calls, TAIL_C); diff --git a/pkg/bpf/testdata/unreachable-tailcall.o b/pkg/bpf/testdata/unreachable-tailcall.o index 860901c39cf93..3486ab0600fa7 100644 Binary files a/pkg/bpf/testdata/unreachable-tailcall.o and b/pkg/bpf/testdata/unreachable-tailcall.o differ diff --git a/pkg/bpf/unused_tailcalls.go b/pkg/bpf/unused_tailcalls.go new file mode 100644 index 0000000000000..09a855247f199 --- /dev/null +++ b/pkg/bpf/unused_tailcalls.go @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bpf + +import ( + "fmt" + "log/slog" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + + "github.com/cilium/cilium/pkg/bpf/analyze" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +// removeUnusedTailcalls removes tail calls that are not reachable from +// entrypoint programs. +func removeUnusedTailcalls(logger *slog.Logger, spec *ebpf.CollectionSpec) error { + type tail struct { + referenced bool + visited bool + spec *ebpf.ProgramSpec + } + + // Build a map of tail call slots to ProgramSpecs. + tails := make(map[uint32]*tail) + for _, prog := range spec.Programs { + if !isTailCall(prog) { + continue + } + + slot, err := tailCallSlot(prog) + if err != nil { + return fmt.Errorf("getting tail call slot: %w", err) + } + + tails[slot] = &tail{ + spec: prog, + } + } + + // Discover all tailcalls that are reachable from the given program. + visit := func(prog *ebpf.ProgramSpec, tailcalls map[uint32]*tail) error { + // Load Blocks computed after compilation, or compute new ones. + bl, err := analyze.MakeBlocks(prog.Instructions) + if err != nil { + return fmt.Errorf("computing Blocks for Program %s: %w", prog.Name, err) + } + + // Analyze reachability given the VariableSpecs provided at load time. + bl, err = analyze.Reachability(bl, prog.Instructions, analyze.VariableSpecs(spec.Variables)) + if err != nil { + return fmt.Errorf("reachability analysis for program %s: %w", prog.Name, err) + } + + const windowSize = 3 + + i := -1 + for _, live := range bl.LiveInstructions(prog.Instructions) { + i++ + if !live { + continue + } + + if i <= windowSize { + // Not enough instructions to backtrack yet. + continue + } + + // The `tail_call_static` C function is always used to call tail calls when + // the map index is known at compile time. + // Due to inline ASM this generates the following instructions: + // Mov R1, Rx + // Mov R2, + // Mov R3, + // call tail_call + + // Find the tail call instruction. + inst := prog.Instructions[i] + if !inst.IsBuiltinCall() || inst.Constant != int64(asm.FnTailCall) { + continue + } + + // Check that the previous instruction is a mov of the tail call index. + movIdx := prog.Instructions[i-1] + if movIdx.OpCode.ALUOp() != asm.Mov || movIdx.Dst != asm.R3 { + continue + } + + // Check that the instruction before that is the load of the tail call map. + movR2 := prog.Instructions[i-2] + if movR2.OpCode != asm.LoadImmOp(asm.DWord) || movR2.Src != asm.PseudoMapFD { + continue + } + + ref := movR2.Reference() + + // Ignore static tail calls made to maps that are not the calls map + if ref != callsMap { + logger.Debug( + "skipping tail call into map other than the calls map", + logfields.Section, prog.SectionName, + logfields.Prog, prog.Name, + logfields.Instruction, i, + logfields.Reference, ref, + ) + continue + } + + tc := tailcalls[uint32(movIdx.Constant)] + if tc == nil { + return fmt.Errorf( + "potential missed tail call in program %s to slot %d at insn %d", + prog.Name, + movIdx.Constant, + i, + ) + } + + tc.referenced = true + } + + return nil + } + + // Discover all tailcalls that are reachable from the entrypoints. + for _, prog := range spec.Programs { + if !isEntrypoint(prog) { + continue + } + if err := visit(prog, tails); err != nil { + return err + } + } + + // Keep visiting tailcalls until no more are discovered. +reset: + for _, tailcall := range tails { + // If a tailcall is referenced by an entrypoint or another tailcall we should visit it + if tailcall.referenced && !tailcall.visited { + if err := visit(tailcall.spec, tails); err != nil { + return err + } + tailcall.visited = true + + // Visiting this tail call might have caused tail calls earlier in the list to become referenced, but this + // loop already skipped them. So reset the loop. If we already visited a tailcall we will ignore them anyway. + goto reset + } + } + + // Remove all tailcalls that are not referenced. + for _, tailcall := range tails { + if !tailcall.referenced { + logger.Debug( + "unreferenced tail call, deleting", + logfields.Section, tailcall.spec.SectionName, + logfields.Prog, tailcall.spec.Name, + ) + + delete(spec.Programs, tailcall.spec.Name) + } + } + + return nil +} diff --git a/pkg/bpf/unused_tailcalls_test.go b/pkg/bpf/unused_tailcalls_test.go new file mode 100644 index 0000000000000..d41ed72c9ac33 --- /dev/null +++ b/pkg/bpf/unused_tailcalls_test.go @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package bpf + +import ( + "testing" + + "github.com/cilium/ebpf" + "github.com/cilium/hive/hivetest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRemoveUnusedTailcalls(t *testing.T) { + logger := hivetest.Logger(t) + // Use upstream LoadCollectionSpec to defer the call to + // removeUnusedTailcalls. + spec, err := ebpf.LoadCollectionSpec("testdata/unreachable-tailcall.o") + require.NoError(t, err) + + assert.Contains(t, spec.Programs, "cil_entry") + assert.Contains(t, spec.Programs, "a") + assert.Contains(t, spec.Programs, "b") + assert.Contains(t, spec.Programs, "c") + assert.Contains(t, spec.Programs, "d") + assert.Contains(t, spec.Programs, "e") + + cpy := spec.Copy() + obj := struct { + UseTailB *ebpf.VariableSpec `ebpf:"__config_use_tail_b"` + }{} + require.NoError(t, cpy.Assign(&obj)) + require.NoError(t, obj.UseTailB.Set(true)) + + require.NoError(t, removeUnusedTailcalls(logger, cpy)) + + assert.Contains(t, cpy.Programs, "cil_entry") + assert.Contains(t, cpy.Programs, "a") + assert.Contains(t, cpy.Programs, "b") + assert.Contains(t, cpy.Programs, "c") + assert.NotContains(t, cpy.Programs, "d") + assert.NotContains(t, cpy.Programs, "e") + + cpy = spec.Copy() + obj = struct { + UseTailB *ebpf.VariableSpec `ebpf:"__config_use_tail_b"` + }{} + require.NoError(t, cpy.Assign(&obj)) + require.NoError(t, obj.UseTailB.Set(false)) + + require.NoError(t, removeUnusedTailcalls(logger, cpy)) + + assert.Contains(t, cpy.Programs, "cil_entry") + assert.Contains(t, cpy.Programs, "a") + assert.NotContains(t, cpy.Programs, "b") + assert.Contains(t, cpy.Programs, "c") + assert.NotContains(t, cpy.Programs, "d") + assert.NotContains(t, cpy.Programs, "e") +} diff --git a/pkg/ciliumenvoyconfig/script_test.go b/pkg/ciliumenvoyconfig/script_test.go index 1d4364ae6e8ca..ae9c36fe7e28d 100644 --- a/pkg/ciliumenvoyconfig/script_test.go +++ b/pkg/ciliumenvoyconfig/script_test.go @@ -35,6 +35,7 @@ import ( "google.golang.org/protobuf/proto" daemonk8s "github.com/cilium/cilium/daemon/k8s" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/datapath/tables" "github.com/cilium/cilium/pkg/endpoint/regeneration" "github.com/cilium/cilium/pkg/envoy" @@ -118,6 +119,7 @@ func TestScript(t *testing.T) { }, ), node.LocalNodeStoreTestCell, + cell.Provide(func() cmtypes.ClusterInfo { return cmtypes.ClusterInfo{} }), cell.Invoke(func(lns_ *node.LocalNodeStore) { lns = lns_ }), ), tableCells, diff --git a/pkg/client/client.go b/pkg/client/client.go index fcaafc7241b04..8ca559b2aa9b3 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -436,8 +436,8 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai } if sr.ClusterMesh != nil { - fmt.Fprintf(w, "ClusterMesh:\t%d/%d remote clusters ready, %d global-services\n", - NumReadyClusters(sr.ClusterMesh.Clusters), len(sr.ClusterMesh.Clusters), sr.ClusterMesh.NumGlobalServices) + fmt.Fprintf(w, "ClusterMesh:\t%d/%d remote clusters ready\n", + NumReadyClusters(sr.ClusterMesh.Clusters), len(sr.ClusterMesh.Clusters)) verbosity := RemoteClustersStatusNotReadyOnly if sd.AllClusters { diff --git a/pkg/clustermesh/clustermesh.go b/pkg/clustermesh/clustermesh.go index e35c9e9240b3f..b5fccdc194033 100644 --- a/pkg/clustermesh/clustermesh.go +++ b/pkg/clustermesh/clustermesh.go @@ -134,12 +134,9 @@ func NewClusterMesh(lifecycle cell.Lifecycle, c Configuration) *ClusterMesh { nodeName := nodeTypes.GetName() cm := &ClusterMesh{ - conf: c, - nodeName: nodeName, - globalServices: common.NewGlobalServiceCache( - c.Logger, - c.Metrics.TotalGlobalServices.WithLabelValues(c.ClusterInfo.Name, nodeName), - ), + conf: c, + nodeName: nodeName, + globalServices: common.NewGlobalServiceCache(c.Logger), FeatureMetrics: c.FeatureMetrics, } @@ -210,12 +207,14 @@ func (cm *ClusterMesh) NewRemoteCluster(name string, status common.StatusFunc) c cm.conf.ServiceMerger.MergeExternalServiceDelete, ), store.RWSWithOnSyncCallback(func(ctx context.Context) { close(rc.synced.services) }), + store.RWSWithEntriesMetric(cm.conf.Metrics.TotalServices.WithLabelValues(cm.conf.ClusterInfo.Name, cm.nodeName, rc.name)), ) rc.ipCacheWatcher = ipcache.NewIPIdentityWatcher( cm.conf.Logger, name, cm.conf.IPCache, cm.conf.StoreFactory, source.ClusterMesh, store.RWSWithOnSyncCallback(func(ctx context.Context) { close(rc.synced.ipcache) }), + store.RWSWithEntriesMetric(cm.conf.Metrics.TotalEndpoints.WithLabelValues(cm.conf.ClusterInfo.Name, cm.nodeName, rc.name)), ) rc.ipCacheWatcherExtraOpts = cm.conf.IPCacheWatcherExtraOpts @@ -281,9 +280,7 @@ func (cm *ClusterMesh) synced(ctx context.Context, toWaitFn func(*remoteCluster) // Status returns the status of the ClusterMesh subsystem func (cm *ClusterMesh) Status() (status *models.ClusterMeshStatus) { - status = &models.ClusterMeshStatus{ - NumGlobalServices: int64(cm.globalServices.Size()), - } + status = &models.ClusterMeshStatus{} cm.common.ForEachRemoteCluster(func(rci common.RemoteCluster) error { rc := rci.(*remoteCluster) diff --git a/pkg/clustermesh/common/services.go b/pkg/clustermesh/common/services.go index 6d31a3b7f24d8..57208b7a1750f 100644 --- a/pkg/clustermesh/common/services.go +++ b/pkg/clustermesh/common/services.go @@ -13,7 +13,6 @@ import ( "github.com/cilium/cilium/pkg/kvstore/store" "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/logging/logfields" - "github.com/cilium/cilium/pkg/metrics/metric" ) type GlobalService struct { @@ -30,16 +29,12 @@ type GlobalServiceCache struct { logger *slog.Logger mutex lock.RWMutex byName map[types.NamespacedName]*GlobalService - - // metricTotalGlobalServices is the gauge metric for total of global services - metricTotalGlobalServices metric.Gauge } -func NewGlobalServiceCache(logger *slog.Logger, metricTotalGlobalServices metric.Gauge) *GlobalServiceCache { +func NewGlobalServiceCache(logger *slog.Logger) *GlobalServiceCache { return &GlobalServiceCache{ - logger: logger, - byName: map[types.NamespacedName]*GlobalService{}, - metricTotalGlobalServices: metricTotalGlobalServices, + logger: logger, + byName: map[types.NamespacedName]*GlobalService{}, } } @@ -101,7 +96,6 @@ func (c *GlobalServiceCache) OnUpdate(svc *serviceStore.ClusterService) { logfields.ServiceName, svc, logfields.ClusterName, svc.Cluster, ) - c.metricTotalGlobalServices.Set(float64(len(c.byName))) } c.logger.Debug( @@ -137,7 +131,6 @@ func (c *GlobalServiceCache) delete(globalService *GlobalService, clusterName st logfields.ClusterName, clusterName, ) delete(c.byName, serviceNN) - c.metricTotalGlobalServices.Set(float64(len(c.byName))) } return true diff --git a/pkg/clustermesh/common/services_test.go b/pkg/clustermesh/common/services_test.go index 8ca99bb12dbe9..235c1c4b02966 100644 --- a/pkg/clustermesh/common/services_test.go +++ b/pkg/clustermesh/common/services_test.go @@ -10,7 +10,6 @@ import ( "github.com/stretchr/testify/require" serviceStore "github.com/cilium/cilium/pkg/clustermesh/store" - "github.com/cilium/cilium/pkg/metrics" ) type fakeUpstream struct { @@ -32,7 +31,7 @@ func TestRemoteServiceObserver(t *testing.T) { } svc1 := serviceStore.ClusterService{Cluster: "remote", Namespace: "namespace", Name: "name", IncludeExternal: false, Shared: true} svc2 := serviceStore.ClusterService{Cluster: "remote", Namespace: "namespace", Name: "name"} - cache := NewGlobalServiceCache(hivetest.Logger(t), metrics.NoOpGauge) + cache := NewGlobalServiceCache(hivetest.Logger(t)) var upstream fakeUpstream observer := NewSharedServicesObserver(hivetest.Logger(t), cache, upstream.OnUpdate, upstream.OnDelete) diff --git a/pkg/clustermesh/endpointslicesync/endpointslice_test.go b/pkg/clustermesh/endpointslicesync/endpointslice_test.go index d6e044d0c744b..6ad2055d286b0 100644 --- a/pkg/clustermesh/endpointslicesync/endpointslice_test.go +++ b/pkg/clustermesh/endpointslicesync/endpointslice_test.go @@ -31,7 +31,6 @@ import ( slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" "github.com/cilium/cilium/pkg/k8s/utils" "github.com/cilium/cilium/pkg/loadbalancer" - "github.com/cilium/cilium/pkg/metrics/metric" ) const ( @@ -113,7 +112,7 @@ func Test_meshEndpointSlice_Reconcile(t *testing.T) { } defer hive.Stop(tlog, context.Background()) - globalService := common.NewGlobalServiceCache(hivetest.Logger(t), metric.NewGauge(metric.GaugeOpts{})) + globalService := common.NewGlobalServiceCache(hivetest.Logger(t)) podInformer := newMeshPodInformer(logger, globalService) nodeInformer := newMeshNodeInformer(logger) controller, serviceInformer, endpointsliceInformer := newEndpointSliceMeshController( diff --git a/pkg/clustermesh/kvstoremesh/kvstoremesh.go b/pkg/clustermesh/kvstoremesh/kvstoremesh.go index 742b1f5329209..e783c509b1c6c 100644 --- a/pkg/clustermesh/kvstoremesh/kvstoremesh.go +++ b/pkg/clustermesh/kvstoremesh/kvstoremesh.go @@ -68,6 +68,8 @@ type KVStoreMesh struct { // clock allows to override the clock for testing purposes clock clock.Clock + + started chan struct{} } type params struct { @@ -97,6 +99,7 @@ func newKVStoreMesh(lc cell.Lifecycle, params params) *KVStoreMesh { storeFactory: params.StoreFactory, logger: params.Logger, clock: clock.RealClock{}, + started: make(chan struct{}), } km.common = common.NewClusterMesh(common.Configuration{ Logger: params.Logger, @@ -109,27 +112,39 @@ func newKVStoreMesh(lc cell.Lifecycle, params params) *KVStoreMesh { lc.Append(km.common) + // Needs to run after the "common" start hook, to signal that initialization + // successfully completed. + lc.Append(cell.Hook{ + OnStart: func(hc cell.HookContext) error { + close(km.started) + return nil + }, + }) + return &km } -type SyncWaiterParams struct { - KVStoreMesh *KVStoreMesh - SyncState syncstate.SyncState - Lifecycle cell.Lifecycle - JobGroup job.Group - Health cell.Health -} +// SyncWaiter wraps a SyncState to wait for KVStoreMesh synchronization, while +// allowing to force marking it as ready when necessary. +type SyncWaiter func() -func RegisterSyncWaiter(p SyncWaiterParams) { - syncedCallback := p.SyncState.WaitForResource() +func NewSyncWaiter(jg job.Group, km *KVStoreMesh, ss syncstate.SyncState) SyncWaiter { + done := ss.WaitForResource() - p.JobGroup.Add( + jg.Add( job.OneShot("kvstoremesh-sync-waiter", func(ctx context.Context, health cell.Health) error { - return p.KVStoreMesh.synced(ctx, syncedCallback) + return km.synced(ctx, func(ctx context.Context) { + done(ctx) + ss.Stop() + }) }), ) + + return func() { ss.Stop(); done(context.Background()) } } +func (sw SyncWaiter) ForceReady() { sw() } + func (km *KVStoreMesh) newRemoteCluster(name string, status common.StatusFunc) common.RemoteCluster { ctx, cancel := context.WithCancel(context.Background()) @@ -182,6 +197,12 @@ func (km *KVStoreMesh) newRemoteCluster(name string, status common.StatusFunc) c // timeout has been reached. The given syncCallback is always executed before // the function returns. func (km *KVStoreMesh) synced(ctx context.Context, syncCallback func(context.Context)) error { + select { + case <-km.started: + case <-ctx.Done(): + return ctx.Err() + } + ctx, cancel := context.WithTimeout(ctx, km.config.GlobalReadyTimeout) defer func() { syncCallback(ctx) diff --git a/pkg/clustermesh/kvstoremesh/kvstoremesh_test.go b/pkg/clustermesh/kvstoremesh/kvstoremesh_test.go index d7c93d7c9132c..46d136528386b 100644 --- a/pkg/clustermesh/kvstoremesh/kvstoremesh_test.go +++ b/pkg/clustermesh/kvstoremesh/kvstoremesh_test.go @@ -748,10 +748,12 @@ func TestRemoteClusterSync(t *testing.T) { clusters: make(map[string]*remoteCluster), } km := KVStoreMesh{ - config: tt.config, - common: mockClusterMesh, - logger: hivetest.Logger(t), + config: tt.config, + common: mockClusterMesh, + logger: hivetest.Logger(t), + started: make(chan struct{}), } + close(km.started) rc := &remoteCluster{ name: "foo", diff --git a/pkg/clustermesh/mcsapi/metrics.go b/pkg/clustermesh/mcsapi/metrics.go index d836b7443a43a..466aa831507ed 100644 --- a/pkg/clustermesh/mcsapi/metrics.go +++ b/pkg/clustermesh/mcsapi/metrics.go @@ -32,6 +32,10 @@ func registerMCSAPICollector(registry *metrics.Registry, logger *slog.Logger, cl prometheus.BuildFQName(metrics.CiliumOperatorNamespace, "", "serviceimport_info"), "Information about ServiceImport in the local cluster", []string{"serviceimport", "namespace"}, nil), + serviceImportStatusCondition: prometheus.NewDesc( + prometheus.BuildFQName(metrics.CiliumOperatorNamespace, "", "serviceimport_status_condition"), + "Status Condition of ServiceImport in the local cluster", + []string{"serviceimport", "namespace", "condition", "status"}, nil), }) } @@ -42,12 +46,14 @@ type mcsAPICollector struct { serviceExportInfo *prometheus.Desc serviceExportStatusCondition *prometheus.Desc serviceImportInfo *prometheus.Desc + serviceImportStatusCondition *prometheus.Desc } func (c *mcsAPICollector) Describe(ch chan<- *prometheus.Desc) { ch <- c.serviceExportInfo ch <- c.serviceExportStatusCondition ch <- c.serviceImportInfo + ch <- c.serviceImportStatusCondition } func (c *mcsAPICollector) Collect(ch chan<- prometheus.Metric) { @@ -103,5 +109,19 @@ func (c *mcsAPICollector) Collect(ch chan<- prometheus.Metric) { return } ch <- metric + for _, condition := range svcImport.Status.Conditions { + metric, err := prometheus.NewConstMetric( + c.serviceImportStatusCondition, + prometheus.GaugeValue, + 1, + svcImport.Name, svcImport.Namespace, + string(condition.Type), string(condition.Status), + ) + if err != nil { + c.logger.Error("Failed to generate ServiceImport metrics", logfields.Error, err) + return + } + ch <- metric + } } } diff --git a/pkg/clustermesh/mcsapi/serviceimport_controller.go b/pkg/clustermesh/mcsapi/serviceimport_controller.go index 2e892b2e46b94..efddf6853fecb 100644 --- a/pkg/clustermesh/mcsapi/serviceimport_controller.go +++ b/pkg/clustermesh/mcsapi/serviceimport_controller.go @@ -32,10 +32,6 @@ import ( "github.com/cilium/cilium/pkg/logging/logfields" ) -const ( - conditionTypeReady = "Ready" -) - // mcsAPIServiceImportReconciler is a controller that automatically creates // ServiceImport from ServiceExport (and their corresponding Services) from // remote clusters and the local cluster. It also handles ServiceExport status @@ -187,8 +183,8 @@ func checkPortConflict(port, olderPort portMerge) string { // mergePorts merge all the ports into a map while doing conflict resolution // with the oldest CreationTimestamp. It also return if it detects any conflict -func mergePorts(orderedSvcExports []*mcsapitypes.MCSAPIServiceSpec) ([]portMerge, string) { - conflict := "" +func mergePorts(orderedSvcExports []*mcsapitypes.MCSAPIServiceSpec) ([]portMerge, mcsapiv1alpha1.ServiceExportConditionReason, string) { + conflictMsg := "" ports := []portMerge{} portsByName := map[string]portMerge{} for _, svcExport := range orderedSvcExports { @@ -200,8 +196,8 @@ func mergePorts(orderedSvcExports []*mcsapitypes.MCSAPIServiceSpec) ([]portMerge } conflictDuplicatedPortName := checkDuplicatedPortNameConflict(portMergeValue, portsByName) - if conflict == "" { - conflict = conflictDuplicatedPortName + if conflictMsg == "" { + conflictMsg = conflictDuplicatedPortName } if conflictDuplicatedPortName != "" { continue @@ -216,12 +212,16 @@ func mergePorts(orderedSvcExports []*mcsapitypes.MCSAPIServiceSpec) ([]portMerge // exporting that port name portsByName[port.Name] = portMergeValue ports = append(ports, portMergeValue) - } else if conflict == "" { - conflict = checkPortConflict(portMergeValue, ports[portIndex]) + } else if conflictMsg == "" { + conflictMsg = checkPortConflict(portMergeValue, ports[portIndex]) } } } - return ports, conflict + reason := mcsapiv1alpha1.ServiceExportReasonNoConflicts + if conflictMsg != "" { + reason = mcsapiv1alpha1.ServiceExportReasonPortConflict + } + return ports, reason, conflictMsg } func mergedPortsToMCSPorts(mergedPorts []portMerge) []mcsapiv1alpha1.ServicePort { @@ -232,14 +232,14 @@ func mergedPortsToMCSPorts(mergedPorts []portMerge) []mcsapiv1alpha1.ServicePort return ports } -func getServiceImportStatus(svcExportByCluster operator.ServiceExportsByCluster) mcsapiv1alpha1.ServiceImportStatus { - clusters := []mcsapiv1alpha1.ClusterStatus{} +func getClustersStatus(svcExportByCluster operator.ServiceExportsByCluster) []mcsapiv1alpha1.ClusterStatus { + clusters := make([]mcsapiv1alpha1.ClusterStatus, 0, len(svcExportByCluster)) for _, cluster := range slices.Sorted(maps.Keys(svcExportByCluster)) { clusters = append(clusters, mcsapiv1alpha1.ClusterStatus{ Cluster: cluster, }) } - return mcsapiv1alpha1.ServiceImportStatus{Clusters: clusters} + return clusters } func derefSessionAffinity(sessionAffinityConfig *corev1.SessionAffinityConfig) *int32 { @@ -254,16 +254,18 @@ func derefSessionAffinity(sessionAffinityConfig *corev1.SessionAffinityConfig) * // checkConflictExport check if there are any conflict to be added on // the ServiceExport object. This function does not check for conflict on the // ports field this aspect should be done by mergePorts -func checkConflictExport(orderedSvcExports []*mcsapitypes.MCSAPIServiceSpec) string { +func checkConflictExport(orderedSvcExports []*mcsapitypes.MCSAPIServiceSpec) (mcsapiv1alpha1.ServiceExportConditionReason, string) { clusterCount := len(orderedSvcExports) fieldStructs := []struct { name string + reason mcsapiv1alpha1.ServiceExportConditionReason getterFunc func(svcSpec *mcsapitypes.MCSAPIServiceSpec) string equalFunc func(svc1, svc2 *mcsapitypes.MCSAPIServiceSpec) bool }{ { - name: "type", + name: "type", + reason: mcsapiv1alpha1.ServiceExportReasonTypeConflict, getterFunc: func(svcSpec *mcsapitypes.MCSAPIServiceSpec) string { return string(svcSpec.Type) }, @@ -272,7 +274,8 @@ func checkConflictExport(orderedSvcExports []*mcsapitypes.MCSAPIServiceSpec) str }, }, { - name: "sessionAffinity", + name: "sessionAffinity", + reason: mcsapiv1alpha1.ServiceExportReasonSessionAffinityConflict, getterFunc: func(svcSpec *mcsapitypes.MCSAPIServiceSpec) string { return string(svcSpec.SessionAffinity) }, @@ -281,7 +284,8 @@ func checkConflictExport(orderedSvcExports []*mcsapitypes.MCSAPIServiceSpec) str }, }, { - name: "sessionAffinityConfig.clientIP", + name: "sessionAffinityConfig.clientIP", + reason: mcsapiv1alpha1.ServiceExportReasonSessionAffinityConfigConflict, getterFunc: func(svcSpec *mcsapitypes.MCSAPIServiceSpec) string { timeoutSeconds := derefSessionAffinity(svcSpec.SessionAffinityConfig) if timeoutSeconds == nil { @@ -294,7 +298,8 @@ func checkConflictExport(orderedSvcExports []*mcsapitypes.MCSAPIServiceSpec) str }, }, { - name: "annotations", + name: "annotations", + reason: mcsapiv1alpha1.ServiceExportReasonAnnotationsConflict, getterFunc: func(svcSpec *mcsapitypes.MCSAPIServiceSpec) string { return fmt.Sprintf("%v", svcSpec.Annotations) }, @@ -303,7 +308,8 @@ func checkConflictExport(orderedSvcExports []*mcsapitypes.MCSAPIServiceSpec) str }, }, { - name: "labels", + name: "labels", + reason: mcsapiv1alpha1.ServiceExportReasonLabelsConflict, getterFunc: func(svcSpec *mcsapitypes.MCSAPIServiceSpec) string { return fmt.Sprintf("%v", svcSpec.Labels) }, @@ -329,7 +335,7 @@ func checkConflictExport(orderedSvcExports []*mcsapitypes.MCSAPIServiceSpec) str } } - return fmt.Sprintf( + return fieldStruct.reason, fmt.Sprintf( "Conflicting %s. %d/%d clusters disagree. Using \"%s\" from oldest service export in cluster \"%s\".", fieldStruct.name, conflictCount, clusterCount, fieldStruct.getterFunc(orderedSvcExports[0]), @@ -338,7 +344,23 @@ func checkConflictExport(orderedSvcExports []*mcsapitypes.MCSAPIServiceSpec) str } } - return "" + return mcsapiv1alpha1.ServiceExportReasonNoConflicts, "" +} + +func setInvalidStatus(conditions *[]metav1.Condition, reason mcsapiv1alpha1.ServiceExportConditionReason, msg string) bool { + changed := meta.SetStatusCondition(conditions, mcsapiv1alpha1.NewServiceExportCondition( + mcsapiv1alpha1.ServiceExportConditionValid, + metav1.ConditionFalse, + reason, + msg, + )) + changed = meta.SetStatusCondition(conditions, mcsapiv1alpha1.NewServiceExportCondition( + mcsapiv1alpha1.ServiceExportConditionReady, + metav1.ConditionFalse, + mcsapiv1alpha1.ServiceExportReasonPending, + "ServiceExport is not valid", + )) || changed + return meta.RemoveStatusCondition(conditions, string(mcsapiv1alpha1.ServiceExportConditionConflict)) || changed } func (r *mcsAPIServiceImportReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -373,14 +395,11 @@ func (r *mcsAPIServiceImportReconciler) Reconcile(ctx context.Context, req ctrl. return controllerruntime.Fail(err) } if localSvc == nil { - if meta.SetStatusCondition(&svcExport.Status.Conditions, metav1.Condition{ - Type: mcsapiv1alpha1.ServiceExportValid, - Status: metav1.ConditionFalse, - Reason: "NoService", - Message: "Service doesn't exist", - }) { - meta.RemoveStatusCondition(&svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportConflict) - meta.RemoveStatusCondition(&svcExport.Status.Conditions, conditionTypeReady) + if setInvalidStatus( + &svcExport.Status.Conditions, + mcsapiv1alpha1.ServiceExportReasonNoService, + "Service doesn't exist", + ) { if err := r.Client.Status().Update(ctx, svcExport); err != nil { return controllerruntime.Fail(err) } @@ -394,14 +413,11 @@ func (r *mcsAPIServiceImportReconciler) Reconcile(ctx context.Context, req ctrl. svcExportByCluster[r.cluster] = localSvcSpec if localSvc.Spec.Type == corev1.ServiceTypeExternalName { - if meta.SetStatusCondition(&svcExport.Status.Conditions, metav1.Condition{ - Type: mcsapiv1alpha1.ServiceExportValid, - Status: metav1.ConditionFalse, - Reason: "ServiceType", - Message: "Service type ExternalName is not supported", - }) { - meta.RemoveStatusCondition(&svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportConflict) - meta.RemoveStatusCondition(&svcExport.Status.Conditions, conditionTypeReady) + if setInvalidStatus( + &svcExport.Status.Conditions, + mcsapiv1alpha1.ServiceExportReasonInvalidServiceType, + "Service type ExternalName is not supported", + ) { if err := r.Client.Status().Update(ctx, svcExport); err != nil { return controllerruntime.Fail(err) } @@ -411,37 +427,40 @@ func (r *mcsAPIServiceImportReconciler) Reconcile(ctx context.Context, req ctrl. } orderedSvcExports := orderSvcExportByPriority(svcExportByCluster) - mergedPorts, conflictMsg := mergePorts(orderedSvcExports) + mergedPorts, conflictReason, conflictMsg := mergePorts(orderedSvcExports) + if conflictReason == mcsapiv1alpha1.ServiceExportReasonNoConflicts { + conflictReason, conflictMsg = checkConflictExport(orderedSvcExports) + } if svcExport != nil { - changedCondition := meta.SetStatusCondition(&svcExport.Status.Conditions, metav1.Condition{ - Type: mcsapiv1alpha1.ServiceExportValid, - Status: metav1.ConditionTrue, - Reason: mcsapiv1alpha1.ServiceExportValid, - Message: "Service is Valid for export", - }) - - if conflictMsg == "" { - conflictMsg = checkConflictExport(orderedSvcExports) + changedCondition := meta.SetStatusCondition(&svcExport.Status.Conditions, mcsapiv1alpha1.NewServiceExportCondition( + mcsapiv1alpha1.ServiceExportConditionValid, + metav1.ConditionTrue, + mcsapiv1alpha1.ServiceExportReasonValid, + "ServiceExport is valid", + )) + changedCondition = meta.SetStatusCondition(&svcExport.Status.Conditions, mcsapiv1alpha1.NewServiceExportCondition( + mcsapiv1alpha1.ServiceExportConditionReady, + metav1.ConditionTrue, + mcsapiv1alpha1.ServiceExportReasonReady, + "ServiceExport is ready", + )) || changedCondition + + if conflictReason != mcsapiv1alpha1.ServiceExportReasonNoConflicts { + changedCondition = meta.SetStatusCondition(&svcExport.Status.Conditions, mcsapiv1alpha1.NewServiceExportCondition( + mcsapiv1alpha1.ServiceExportConditionConflict, + metav1.ConditionTrue, + conflictReason, + conflictMsg, + )) || changedCondition + } else { + changedCondition = meta.SetStatusCondition(&svcExport.Status.Conditions, mcsapiv1alpha1.NewServiceExportCondition( + mcsapiv1alpha1.ServiceExportConditionConflict, + metav1.ConditionFalse, + mcsapiv1alpha1.ServiceExportReasonNoConflicts, + "ServiceExport has no conflicts", + )) || changedCondition } - if conflictMsg != "" { - changedCondition = meta.SetStatusCondition(&svcExport.Status.Conditions, metav1.Condition{ - Type: mcsapiv1alpha1.ServiceExportConflict, - Status: metav1.ConditionTrue, - Reason: mcsapiv1alpha1.ServiceExportConflict, - Message: conflictMsg, - }) || changedCondition - } - readyStatus := metav1.ConditionFalse - if conflictMsg == "" { - readyStatus = metav1.ConditionTrue - changedCondition = meta.RemoveStatusCondition(&svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportConflict) || changedCondition - } - changedCondition = meta.SetStatusCondition(&svcExport.Status.Conditions, metav1.Condition{ - Type: conditionTypeReady, - Status: readyStatus, - Reason: conditionTypeReady, - }) || changedCondition if changedCondition { if err := r.Client.Status().Update(ctx, svcExport); err != nil { return controllerruntime.Fail(err) @@ -456,7 +475,8 @@ func (r *mcsAPIServiceImportReconciler) Reconcile(ctx context.Context, req ctrl. svcImport.Spec.SessionAffinityConfig = oldestClusterSvc.SessionAffinityConfig.DeepCopy() svcImport.Labels = maps.Clone(oldestClusterSvc.Labels) annotations := maps.Clone(oldestClusterSvc.Annotations) - if _, ok := svcImport.Annotations[mcsapicontrollers.DerivedServiceAnnotation]; ok { + _, derivedSvcAnnotationExists := svcImport.Annotations[mcsapicontrollers.DerivedServiceAnnotation] + if derivedSvcAnnotationExists { if annotations == nil { annotations = map[string]string{} } @@ -469,9 +489,24 @@ func (r *mcsAPIServiceImportReconciler) Reconcile(ctx context.Context, req ctrl. return controllerruntime.Fail(err) } - newStatus := getServiceImportStatus(svcExportByCluster) - if !reflect.DeepEqual(svcImport.Status, newStatus) { - svcImport.Status = newStatus + svcImportStatusOriginal := svcImport.Status.DeepCopy() + svcImport.Status.Clusters = getClustersStatus(svcExportByCluster) + if derivedSvcAnnotationExists { + meta.SetStatusCondition(&svcImport.Status.Conditions, mcsapiv1alpha1.NewServiceImportCondition( + mcsapiv1alpha1.ServiceImportConditionReady, + metav1.ConditionTrue, + mcsapiv1alpha1.ServiceImportReasonReady, + "ServiceImport is ready", + )) + } else { + meta.SetStatusCondition(&svcImport.Status.Conditions, mcsapiv1alpha1.NewServiceImportCondition( + mcsapiv1alpha1.ServiceImportConditionReady, + metav1.ConditionFalse, + mcsapiv1alpha1.ServiceImportReasonPending, + "Waiting for the derived Service to be created", + )) + } + if !reflect.DeepEqual(svcImportStatusOriginal, svcImport.Status) { if err := r.Client.Status().Update(ctx, svcImport); err != nil { return controllerruntime.Fail(err) } diff --git a/pkg/clustermesh/mcsapi/serviceimport_controller_test.go b/pkg/clustermesh/mcsapi/serviceimport_controller_test.go index fe16dca8693b7..195609fce28a6 100644 --- a/pkg/clustermesh/mcsapi/serviceimport_controller_test.go +++ b/pkg/clustermesh/mcsapi/serviceimport_controller_test.go @@ -5,7 +5,6 @@ package mcsapi import ( "context" - "fmt" "maps" "testing" "time" @@ -26,7 +25,6 @@ import ( mcsapitypes "github.com/cilium/cilium/pkg/clustermesh/mcsapi/types" "github.com/cilium/cilium/pkg/clustermesh/operator" - "github.com/cilium/cilium/pkg/metrics/metric" ) const ( @@ -503,7 +501,7 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { WithStatusSubresource(&mcsapiv1alpha1.ServiceImport{}). WithScheme(testScheme()). Build() - globalServiceExports := operator.NewGlobalServiceExportCache(metric.NewGauge(metric.GaugeOpts{})) + globalServiceExports := operator.NewGlobalServiceExportCache() remoteClusterServiceSource := &remoteClusterServiceExportSource{Logger: hivetest.Logger(t)} for _, svcExport := range remoteSvcImportTestFixtures { globalServiceExports.OnUpdate(svcExport) @@ -541,13 +539,14 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { SessionAffinityConfig: nil, }, svcImport.Spec) require.Len(t, svcImport.Status.Clusters, 1) + require.True(t, meta.IsStatusConditionFalse(svcImport.Status.Conditions, string(mcsapiv1alpha1.ServiceImportConditionReady))) svcExport, err := getServiceExport(c, key) require.NoError(t, err) require.NotNil(t, svcExport) - require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, conditionTypeReady)) - require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportValid)) - require.Nil(t, meta.FindStatusCondition(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportConflict)) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionReady))) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionValid))) + require.True(t, meta.IsStatusConditionFalse(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionConflict))) }) t.Run("Service import creation with remote-only", func(t *testing.T) { @@ -566,6 +565,7 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { require.NoError(t, err) require.NotNil(t, svcImport) require.Len(t, svcImport.Status.Clusters, 1) + require.True(t, meta.IsStatusConditionFalse(svcImport.Status.Conditions, string(mcsapiv1alpha1.ServiceImportConditionReady))) require.Equal(t, remoteClusterName, svcImport.Status.Clusters[0].Cluster) }) @@ -636,9 +636,9 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { svcExport, err := getServiceExport(c, key) require.NoError(t, err) require.NotNil(t, svcExport) - require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, conditionTypeReady)) - require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportValid)) - require.Nil(t, meta.FindStatusCondition(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportConflict)) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionReady))) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionValid))) + require.True(t, meta.IsStatusConditionFalse(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionConflict))) }) t.Run("Delete local service test", func(t *testing.T) { @@ -723,6 +723,7 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { require.NoError(t, err) require.NotNil(t, svcImport) require.Len(t, svcImport.Status.Clusters, 1) + require.True(t, meta.IsStatusConditionFalse(svcImport.Status.Conditions, string(mcsapiv1alpha1.ServiceImportConditionReady))) require.Equal(t, remoteClusterName, svcImport.Status.Clusters[0].Cluster) globalServiceExports.OnUpdate(&mcsapitypes.MCSAPIServiceSpec{ @@ -773,7 +774,6 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { require.True(t, maps.Equal(svcImport.Labels, map[string]string{ "exported-label": "", })) - fmt.Println(svcImport.Annotations) require.True(t, maps.Equal(svcImport.Annotations, map[string]string{ mcsapicontrollers.DerivedServiceAnnotation: "", "exported-annotation": "", @@ -801,9 +801,9 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { require.NoError(t, err) require.NotNil(t, svcExport) - require.True(t, meta.IsStatusConditionFalse(svcExport.Status.Conditions, conditionTypeReady)) - require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportValid)) - require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportConflict)) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionReady))) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionValid))) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionConflict))) globalServiceExports.OnUpdate(&mcsapitypes.MCSAPIServiceSpec{ Cluster: remoteClusterName, @@ -830,15 +830,16 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { require.NoError(t, err) require.NotNil(t, svcExport) - require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, conditionTypeReady)) - require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportValid)) - require.Nil(t, meta.FindStatusCondition(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportConflict)) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionReady))) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionValid))) + require.True(t, meta.IsStatusConditionFalse(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionConflict))) }) conflictTests := []struct { name string remoteSvcImportValid func(*mcsapiv1alpha1.ServiceImport) bool localSvcImportValid func(*mcsapiv1alpha1.ServiceImport) bool + assertReason mcsapiv1alpha1.ServiceExportConditionReason assertMsgInclude string }{ { @@ -849,6 +850,7 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { localSvcImportValid: func(svcImport *mcsapiv1alpha1.ServiceImport) bool { return svcImport.Spec.Type == mcsapiv1alpha1.ClusterSetIP }, + assertReason: mcsapiv1alpha1.ServiceExportReasonTypeConflict, assertMsgInclude: "1/2 clusters disagree", }, { @@ -859,6 +861,7 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { localSvcImportValid: func(svcImport *mcsapiv1alpha1.ServiceImport) bool { return len(svcImport.Spec.Ports) == 1 && svcImport.Spec.Ports[0].Name == "" }, + assertReason: mcsapiv1alpha1.ServiceExportReasonPortConflict, }, { name: "conflict-port-appprotocol", @@ -868,6 +871,7 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { localSvcImportValid: func(svcImport *mcsapiv1alpha1.ServiceImport) bool { return len(svcImport.Spec.Ports) == 1 && ptr.Deref(svcImport.Spec.Ports[0].AppProtocol, "") == "" }, + assertReason: mcsapiv1alpha1.ServiceExportReasonPortConflict, }, { name: "conflict-duplicated-port-name", @@ -877,6 +881,7 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { localSvcImportValid: func(svcImport *mcsapiv1alpha1.ServiceImport) bool { return len(svcImport.Spec.Ports) == 1 && svcImport.Spec.Ports[0].Port == 4242 }, + assertReason: mcsapiv1alpha1.ServiceExportReasonPortConflict, }, { name: "conflict-session-affinity", @@ -886,6 +891,7 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { localSvcImportValid: func(svcImport *mcsapiv1alpha1.ServiceImport) bool { return svcImport.Spec.SessionAffinity == corev1.ServiceAffinityNone }, + assertReason: mcsapiv1alpha1.ServiceExportReasonSessionAffinityConflict, }, { name: "conflict-session-affinity-config", @@ -895,6 +901,7 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { localSvcImportValid: func(svcImport *mcsapiv1alpha1.ServiceImport) bool { return *svcImport.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds == 4242 }, + assertReason: mcsapiv1alpha1.ServiceExportReasonSessionAffinityConfigConflict, }, { name: "conflict-annotations", @@ -909,6 +916,7 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { "service.cilium.io/global-sync-endpoint-slices": "true", }) }, + assertReason: mcsapiv1alpha1.ServiceExportReasonAnnotationsConflict, }, { name: "conflict-labels", @@ -923,6 +931,7 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { "my-label": "test", }) }, + assertReason: mcsapiv1alpha1.ServiceExportReasonLabelsConflict, }, } @@ -948,14 +957,15 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { require.NoError(t, err) require.NotNil(t, svcExport) - require.True(t, meta.IsStatusConditionFalse(svcExport.Status.Conditions, conditionTypeReady)) - require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportValid)) - require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportConflict)) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionReady))) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionValid))) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionConflict))) if conflictTest.assertMsgInclude != "" { - condition := meta.FindStatusCondition(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportConflict) + condition := meta.FindStatusCondition(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionConflict)) require.NotNil(t, condition) require.Contains(t, condition.Message, conflictTest.assertMsgInclude) + require.Equal(t, string(conflictTest.assertReason), condition.Reason) } }) } @@ -986,9 +996,16 @@ func Test_mcsServiceImport_Reconcile(t *testing.T) { require.NoError(t, err) require.NotNil(t, svcExport) - require.True(t, meta.IsStatusConditionFalse(svcExport.Status.Conditions, conditionTypeReady)) - require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportValid)) - require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, mcsapiv1alpha1.ServiceExportConflict)) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionReady))) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionValid))) + require.True(t, meta.IsStatusConditionTrue(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionConflict))) + + if conflictTest.assertMsgInclude != "" { + condition := meta.FindStatusCondition(svcExport.Status.Conditions, string(mcsapiv1alpha1.ServiceExportConditionConflict)) + require.NotNil(t, condition) + require.Contains(t, condition.Message, conflictTest.assertMsgInclude) + require.Equal(t, string(conflictTest.assertReason), condition.Reason) + } }) } } diff --git a/pkg/clustermesh/metrics.go b/pkg/clustermesh/metrics.go index 2a5efac2bb7a3..b5227d2af091e 100644 --- a/pkg/clustermesh/metrics.go +++ b/pkg/clustermesh/metrics.go @@ -9,11 +9,14 @@ import ( ) type Metrics struct { - // TotalNodes tracks the number of total nodes in a remote cluster. + // TotalNodes tracks the number of total nodes per remote cluster. TotalNodes metric.Vec[metric.Gauge] - // TotalGlobalServices tracks the total number of global services. - TotalGlobalServices metric.Vec[metric.Gauge] + // TotalServices tracks the number of total services per remote cluster. + TotalServices metric.Vec[metric.Gauge] + + // TotalEndpoints tracks the number of total IPs per remote cluster. + TotalEndpoints metric.Vec[metric.Gauge] } func NewMetrics() Metrics { @@ -26,12 +29,20 @@ func NewMetrics() Metrics { Help: "The total number of nodes in the remote cluster", }, []string{metrics.LabelSourceCluster, metrics.LabelSourceNodeName, metrics.LabelTargetCluster}), - TotalGlobalServices: metric.NewGaugeVec(metric.GaugeOpts{ - ConfigName: metrics.Namespace + "_" + subsystem + "_global_services", + TotalServices: metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: metrics.Namespace + "_" + subsystem + "_remote_cluster_services", + Namespace: metrics.Namespace, + Subsystem: subsystem, + Name: "remote_cluster_services", + Help: "The total number of services in the remote cluster", + }, []string{metrics.LabelSourceCluster, metrics.LabelSourceNodeName, metrics.LabelTargetCluster}), + + TotalEndpoints: metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: metrics.Namespace + "_" + subsystem + "_remote_cluster_endpoints", Namespace: metrics.Namespace, Subsystem: subsystem, - Name: "global_services", - Help: "The total number of global services in the cluster mesh", - }, []string{metrics.LabelSourceCluster, metrics.LabelSourceNodeName}), + Name: "remote_cluster_endpoints", + Help: "The total number of endpoints in the remote cluster", + }, []string{metrics.LabelSourceCluster, metrics.LabelSourceNodeName, metrics.LabelTargetCluster}), } } diff --git a/pkg/clustermesh/operator/clustermesh.go b/pkg/clustermesh/operator/clustermesh.go index c289da830d62e..e4d25ee66d11e 100644 --- a/pkg/clustermesh/operator/clustermesh.go +++ b/pkg/clustermesh/operator/clustermesh.go @@ -18,6 +18,7 @@ import ( "github.com/cilium/cilium/pkg/clustermesh/common" mcsapitypes "github.com/cilium/cilium/pkg/clustermesh/mcsapi/types" serviceStore "github.com/cilium/cilium/pkg/clustermesh/store" + "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/clustermesh/wait" "github.com/cilium/cilium/pkg/dial" "github.com/cilium/cilium/pkg/kvstore/store" @@ -29,10 +30,11 @@ type clusterMesh struct { // common implements the common logic to connect to remote clusters. common common.ClusterMesh - cfg ClusterMeshConfig - cfgMCSAPI MCSAPIConfig - logger *slog.Logger - Metrics Metrics + cfg ClusterMeshConfig + cfgMCSAPI MCSAPIConfig + logger *slog.Logger + clusterInfo types.ClusterInfo + metrics Metrics // globalServices is a list of all global services. The datastructure // is protected by its own mutex inside the structure. @@ -97,15 +99,15 @@ func newClusterMesh(lc cell.Lifecycle, params clusterMeshParams) (*clusterMesh, params.Logger.Info("Operator ClusterMesh component enabled") cm := clusterMesh{ - cfg: params.Cfg, - cfgMCSAPI: params.CfgMCSAPI, - logger: params.Logger, - globalServices: common.NewGlobalServiceCache(params.Logger, params.Metrics.TotalGlobalServices.WithLabelValues(params.ClusterInfo.Name)), - globalServiceExports: NewGlobalServiceExportCache( - params.Metrics.TotalGlobalServiceExports.WithLabelValues(params.ClusterInfo.Name), - ), - storeFactory: params.StoreFactory, - syncTimeoutConfig: params.TimeoutConfig, + cfg: params.Cfg, + cfgMCSAPI: params.CfgMCSAPI, + logger: params.Logger, + clusterInfo: params.ClusterInfo, + metrics: params.Metrics, + globalServices: common.NewGlobalServiceCache(params.Logger), + globalServiceExports: NewGlobalServiceExportCache(), + storeFactory: params.StoreFactory, + syncTimeoutConfig: params.TimeoutConfig, } cm.common = common.NewClusterMesh(common.Configuration{ Logger: params.Logger, @@ -223,6 +225,7 @@ func (cm *clusterMesh) newRemoteCluster(name string, status common.StatusFunc) c }, ), store.RWSWithOnSyncCallback(func(ctx context.Context) { rc.synced.services.Stop() }), + store.RWSWithEntriesMetric(cm.metrics.TotalServices.WithLabelValues(cm.clusterInfo.Name, rc.name)), ) rc.remoteServiceExports = cm.storeFactory.NewWatchStore( @@ -245,6 +248,7 @@ func (cm *clusterMesh) newRemoteCluster(name string, status common.StatusFunc) c }, ), store.RWSWithOnSyncCallback(func(ctx context.Context) { rc.synced.serviceExports.Stop() }), + store.RWSWithEntriesMetric(cm.metrics.TotalServiceExports.WithLabelValues(cm.clusterInfo.Name, name)), ) return rc diff --git a/pkg/clustermesh/operator/metrics.go b/pkg/clustermesh/operator/metrics.go index 5bd266919bf36..9432e55fdbf95 100644 --- a/pkg/clustermesh/operator/metrics.go +++ b/pkg/clustermesh/operator/metrics.go @@ -9,25 +9,25 @@ import ( ) type Metrics struct { - // TotalGlobalServices tracks the total number of global services. - TotalGlobalServices metric.Vec[metric.Gauge] - // TotalGlobalServiceExports tracks the total number of global service exports. - TotalGlobalServiceExports metric.Vec[metric.Gauge] + // TotalServices tracks the number of total global services per remote cluster. + TotalServices metric.Vec[metric.Gauge] + // TotalServiceExports tracks the number of total MCS-API service exports per remote cluster. + TotalServiceExports metric.Vec[metric.Gauge] } func NewMetrics() Metrics { return Metrics{ - TotalGlobalServices: metric.NewGaugeVec(metric.GaugeOpts{ + TotalServices: metric.NewGaugeVec(metric.GaugeOpts{ Namespace: metrics.CiliumOperatorNamespace, Subsystem: subsystem, - Name: "global_services", - Help: "The total number of global services in the cluster mesh", - }, []string{metrics.LabelSourceCluster}), - TotalGlobalServiceExports: metric.NewGaugeVec(metric.GaugeOpts{ + Name: "remote_cluster_services", + Help: "The total number of services in the remote cluster", + }, []string{metrics.LabelSourceCluster, metrics.LabelTargetCluster}), + TotalServiceExports: metric.NewGaugeVec(metric.GaugeOpts{ Namespace: metrics.CiliumOperatorNamespace, Subsystem: subsystem, - Name: "global_service_exports", - Help: "The total number of MCS-API global service exports in the cluster mesh", - }, []string{metrics.LabelSourceCluster}), + Name: "remote_cluster_service_exports", + Help: "The total number of MCS-API service exports in the remote cluster", + }, []string{metrics.LabelSourceCluster, metrics.LabelTargetCluster}), } } diff --git a/pkg/clustermesh/operator/remote_cluster_test.go b/pkg/clustermesh/operator/remote_cluster_test.go index c990d083debb1..669da52a8ea61 100644 --- a/pkg/clustermesh/operator/remote_cluster_test.go +++ b/pkg/clustermesh/operator/remote_cluster_test.go @@ -87,17 +87,17 @@ func TestRemoteClusterStatus(t *testing.T) { require.NoError(t, client.DeletePrefix(context.Background(), kvstore.BaseKeyPrefix)) }) - metrics := NewMetrics() logger := hivetest.Logger(t) + metrics := NewMetrics() cm := clusterMesh{ - logger: logger, - storeFactory: st, - globalServices: common.NewGlobalServiceCache(logger, metrics.TotalGlobalServices.WithLabelValues("foo")), - globalServiceExports: NewGlobalServiceExportCache( - metrics.TotalGlobalServiceExports.WithLabelValues("foo"), - ), - cfg: ClusterMeshConfig{ClusterMeshEnableEndpointSync: tt.clusterMeshEnableEndpointSync}, - cfgMCSAPI: MCSAPIConfig{ClusterMeshEnableMCSAPI: tt.clusterMeshEnableMCSAPI}, + logger: logger, + clusterInfo: types.ClusterInfo{ID: 1, Name: "cluster1", MaxConnectedClusters: 255}, + metrics: metrics, + storeFactory: st, + globalServices: common.NewGlobalServiceCache(logger), + globalServiceExports: NewGlobalServiceExportCache(), + cfg: ClusterMeshConfig{ClusterMeshEnableEndpointSync: tt.clusterMeshEnableEndpointSync}, + cfgMCSAPI: MCSAPIConfig{ClusterMeshEnableMCSAPI: tt.clusterMeshEnableMCSAPI}, } // Populate the kvstore with the appropriate KV pairs @@ -186,15 +186,15 @@ func TestRemoteClusterHooks(t *testing.T) { wg.Wait() }) logger := hivetest.Logger(t) - st := store.NewFactory(logger, store.MetricsProvider()) metrics := NewMetrics() + st := store.NewFactory(logger, store.MetricsProvider()) cm := clusterMesh{ - logger: logger, - storeFactory: st, - globalServices: common.NewGlobalServiceCache(logger, metrics.TotalGlobalServices.WithLabelValues("foo")), - globalServiceExports: NewGlobalServiceExportCache( - metrics.TotalGlobalServiceExports.WithLabelValues("foo"), - ), + logger: logger, + clusterInfo: types.ClusterInfo{ID: 1, Name: "cluster1", MaxConnectedClusters: 255}, + metrics: metrics, + storeFactory: st, + globalServices: common.NewGlobalServiceCache(logger), + globalServiceExports: NewGlobalServiceExportCache(), } clusterAddCalledCount := atomic.Uint32{} diff --git a/pkg/clustermesh/operator/service_exports.go b/pkg/clustermesh/operator/service_exports.go index fc11e8efd09e4..837f082fdbb20 100644 --- a/pkg/clustermesh/operator/service_exports.go +++ b/pkg/clustermesh/operator/service_exports.go @@ -12,7 +12,6 @@ import ( mcsapitypes "github.com/cilium/cilium/pkg/clustermesh/mcsapi/types" "github.com/cilium/cilium/pkg/kvstore/store" "github.com/cilium/cilium/pkg/lock" - "github.com/cilium/cilium/pkg/metrics/metric" ) type ( @@ -28,14 +27,11 @@ type GlobalServiceExportCache struct { // size is used to manage a counter of globalServiceExport // as uint instead of the float of metric.Gauge as float are not reliable to count size uint64 - // metricTotalGlobalServiceExports is the gauge metric for total of global service exports - metricTotalGlobalServiceExports metric.Gauge } -func NewGlobalServiceExportCache(metricTotalGlobalServiceExports metric.Gauge) *GlobalServiceExportCache { +func NewGlobalServiceExportCache() *GlobalServiceExportCache { return &GlobalServiceExportCache{ - cache: ServiceExportsByNamespace{}, - metricTotalGlobalServiceExports: metricTotalGlobalServiceExports, + cache: ServiceExportsByNamespace{}, } } @@ -79,7 +75,6 @@ func (c *GlobalServiceExportCache) OnUpdate(svcExport *mcsapitypes.MCSAPIService svcExportsByCluster = ServiceExportsByCluster{} svcExportsByName[svcExport.Name] = svcExportsByCluster c.size += 1 - c.metricTotalGlobalServiceExports.Set(float64(c.size)) } svcExportsByCluster[svcExport.Cluster] = svcExport @@ -109,7 +104,6 @@ func (c *GlobalServiceExportCache) OnDelete(svcExport *mcsapitypes.MCSAPIService return true } c.size -= 1 - c.metricTotalGlobalServiceExports.Set(float64(c.size)) delete(svcExportsByName, svcExport.Name) if len(c.cache[svcExport.Namespace]) != 0 { diff --git a/pkg/clustermesh/operator/service_exports_test.go b/pkg/clustermesh/operator/service_exports_test.go index 6b9ae56cc7315..0a74f406399bc 100644 --- a/pkg/clustermesh/operator/service_exports_test.go +++ b/pkg/clustermesh/operator/service_exports_test.go @@ -13,10 +13,7 @@ import ( ) func TestGlobalServiceExportCache(t *testing.T) { - metrics := NewMetrics() - globalServiceExports := NewGlobalServiceExportCache( - metrics.TotalGlobalServiceExports.WithLabelValues("foo"), - ) + globalServiceExports := NewGlobalServiceExportCache() globalServiceExports.OnUpdate(&mcsapitypes.MCSAPIServiceSpec{ Cluster: "cluster1", diff --git a/pkg/clustermesh/remote_cluster_test.go b/pkg/clustermesh/remote_cluster_test.go index 5945ea01b2b9d..a4dd02132c151 100644 --- a/pkg/clustermesh/remote_cluster_test.go +++ b/pkg/clustermesh/remote_cluster_test.go @@ -24,7 +24,6 @@ import ( "github.com/cilium/cilium/pkg/ipcache" "github.com/cilium/cilium/pkg/kvstore" "github.com/cilium/cilium/pkg/kvstore/store" - "github.com/cilium/cilium/pkg/metrics" nodeTypes "github.com/cilium/cilium/pkg/node/types" "github.com/cilium/cilium/pkg/source" "github.com/cilium/cilium/pkg/testutils" @@ -166,8 +165,8 @@ func TestRemoteClusterRun(t *testing.T) { FeatureMetrics: NewClusterMeshMetricsNoop(), Logger: logger, }, - globalServices: common.NewGlobalServiceCache(logger, metrics.NoOpGauge), FeatureMetrics: NewClusterMeshMetricsNoop(), + globalServices: common.NewGlobalServiceCache(logger), } rc := cm.NewRemoteCluster("foo", nil).(*remoteCluster) ready := make(chan error) @@ -297,7 +296,7 @@ func TestRemoteClusterClusterIDChange(t *testing.T) { Logger: logger, }, FeatureMetrics: NewClusterMeshMetricsNoop(), - globalServices: common.NewGlobalServiceCache(logger, metrics.NoOpGauge), + globalServices: common.NewGlobalServiceCache(logger), } rc := cm.NewRemoteCluster("foo", nil).(*remoteCluster) diff --git a/pkg/clustermesh/testdata/clusterservice.txtar b/pkg/clustermesh/testdata/clusterservice.txtar index d2a17c6beb2c1..cb10e19c45228 100644 --- a/pkg/clustermesh/testdata/clusterservice.txtar +++ b/pkg/clustermesh/testdata/clusterservice.txtar @@ -33,6 +33,17 @@ kvstore/delete cilium/state/services/v1/cluster2/test/echo db/cmp backends backends-3.table db/cmp frontends frontends-with-clusterservice-3.table +# Marking the cluster service as not shared causes the remote backends to be removed +cp clusterservice3.json clusterservice3_v2.json +sed '"shared": true' '"shared": false' clusterservice3_v2.json +kvstore/update cilium/state/services/v1/cluster3/test/echo clusterservice3_v2.json +db/cmp frontends frontends.table + +# Switch back for next test +kvstore/update cilium/state/services/v1/cluster3/test/echo clusterservice3.json +db/cmp backends backends-3.table +db/cmp frontends frontends-with-clusterservice-3.table + # Removing the last cluster service gets us back to starting point kvstore/delete cilium/state/services/v1/cluster3/test/echo db/cmp frontends frontends.table diff --git a/pkg/counter/range.go b/pkg/counter/range.go new file mode 100644 index 0000000000000..42a5f3a5d2bc4 --- /dev/null +++ b/pkg/counter/range.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package counter + +import ( + "github.com/cilium/cilium/pkg/time" +) + +// RangeCount represents a monotonically increasing count along with the first and last time it was +// incremented. +type RangeCount struct { + Count uint64 + First time.Time + Last time.Time +} + +// RangeCounter is a simple counter that tracks a count and a time interval. +type RangeCounter struct { + count RangeCount +} + +// NewRangeCounter creates a new RangeCounter. +func NewRangeCounter() *RangeCounter { + return &RangeCounter{} +} + +// Increment increments the counter and updates the time range. +func (c *RangeCounter) Increment(now time.Time) { + if c.count.Count == 0 || c.count.First.After(now) { + c.count.First = now + } + if c.count.Count == 0 || c.count.Last.Before(now) { + c.count.Last = now + } + c.count.Count++ +} + +// Peek returns the current count. +func (c *RangeCounter) Peek() RangeCount { + return c.count +} + +// Clear clears the counter and returns the existing count. +func (c *RangeCounter) Clear() RangeCount { + count := c.count + c.count = RangeCount{} + return count +} + +// IntervalRangeCounter is a specialized RangeCounter that provides a IsElapsed() method to check if +// the time interval has elapsed since the first increment. +type IntervalRangeCounter struct { + RangeCounter + interval time.Duration +} + +// NewIntervalRangeCounter creates a new IntervalRangeCounter with the specified interval. +func NewIntervalRangeCounter(interval time.Duration) *IntervalRangeCounter { + return &IntervalRangeCounter{ + RangeCounter: RangeCounter{}, + interval: interval, + } +} + +// IsElapsed checks if the duration since the first increment until now exceeds the configured +// interval. It always returns false when the counter is empty as a "start time" is required as base +// to compute the interval. +func (c *IntervalRangeCounter) IsElapsed(now time.Time) bool { + if c.count.Count == 0 { + return false + } + return now.Sub(c.count.First) >= c.interval +} diff --git a/pkg/counter/range_test.go b/pkg/counter/range_test.go new file mode 100644 index 0000000000000..9957b7fd9ca64 --- /dev/null +++ b/pkg/counter/range_test.go @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package counter + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestRangeCounter(t *testing.T) { + now := time.Now() + earlier := now.Add(-1 * time.Second) + later := now.Add(1 * time.Second) + + counter := NewRangeCounter() + require.NotNil(t, counter, "Expected counter to be initialized") + + counter.Increment(now) + require.Equal(t, uint64(1), counter.count.Count, "Expected count to be 1") + require.Equal(t, now, counter.count.First, "Expected first time to be now") + require.Equal(t, now, counter.count.Last, "Expected last time to be now") + + counter.Increment(later) + require.Equal(t, uint64(2), counter.count.Count, "Expected count to be 2") + require.Equal(t, now, counter.count.First, "Expected first time to be now") + require.Equal(t, later, counter.count.Last, "Expected last time to be now + 1s") + + count := counter.Clear() + require.Equal(t, uint64(2), count.Count, "Expected cleared count to be 2") + require.Equal(t, now, count.First, "Expected cleared first time to be now") + require.Equal(t, now.Add(1*time.Second), count.Last, "Expected cleared last time to be now + 1s") + + count = counter.Clear() + require.Equal(t, uint64(0), count.Count, "Expected second cleared count to be 0") + require.Equal(t, time.Time{}, count.First, "Expected second cleared first time to be zero") + require.Equal(t, time.Time{}, count.Last, "Expected second cleared last time to be zero") + + counter.Increment(now) + counter.Increment(earlier) + counter.Increment(later) + require.Equal(t, uint64(3), counter.count.Count, "Expected count to be 3 after increments") + require.Equal(t, earlier, counter.count.First, "Expected first time to be earlier") + require.Equal(t, later, counter.count.Last, "Expected last time to be later") +} + +func TestIntervalRangeCounter(t *testing.T) { + interval := 2 * time.Second + now := time.Now() + + counter := NewIntervalRangeCounter(interval) + require.NotNil(t, counter, "Expected counter to be initialized") + + require.False(t, counter.IsElapsed(now.Add(-2*interval)), "Expected IsElapsed to return false when count is not incremented") + require.False(t, counter.IsElapsed(now), "Expected IsElapsed to return false when count is not incremented") + require.False(t, counter.IsElapsed(now.Add(2*interval)), "Expected IsElapsed to return false when count is not incremented") + + counter.Increment(now) + require.False(t, counter.IsElapsed(now.Add(-2*interval)), "Expected IsElapsed to return false when first time is in the future") + require.False(t, counter.IsElapsed(now), "Expected IsElapsed to return false when first time is now") + require.True(t, counter.IsElapsed(now.Add(2*interval)), "Expected IsElapsed to return true when first time is past the interval") +} diff --git a/pkg/datapath/bpf/probes_bpfeb.o b/pkg/datapath/bpf/probes_bpfeb.o index 50d9bb6738831..a299a04f17952 100644 Binary files a/pkg/datapath/bpf/probes_bpfeb.o and b/pkg/datapath/bpf/probes_bpfeb.o differ diff --git a/pkg/datapath/bpf/probes_bpfel.o b/pkg/datapath/bpf/probes_bpfel.o index 557b6377c934b..53c3bfb6f7dd2 100644 Binary files a/pkg/datapath/bpf/probes_bpfel.o and b/pkg/datapath/bpf/probes_bpfel.o differ diff --git a/pkg/datapath/bpf/sockterm_bpfeb.o b/pkg/datapath/bpf/sockterm_bpfeb.o index a0d4572befef4..8c73e593ea756 100644 Binary files a/pkg/datapath/bpf/sockterm_bpfeb.o and b/pkg/datapath/bpf/sockterm_bpfeb.o differ diff --git a/pkg/datapath/bpf/sockterm_bpfel.o b/pkg/datapath/bpf/sockterm_bpfel.o index ce79edf5eb716..50cbdc31c99e7 100644 Binary files a/pkg/datapath/bpf/sockterm_bpfel.o and b/pkg/datapath/bpf/sockterm_bpfel.o differ diff --git a/pkg/datapath/loader/config.go b/pkg/datapath/config/config.go similarity index 84% rename from pkg/datapath/loader/config.go rename to pkg/datapath/config/config.go index 4e651412bfefa..1448cebabd5f8 100644 --- a/pkg/datapath/loader/config.go +++ b/pkg/datapath/config/config.go @@ -1,17 +1,16 @@ // SPDX-License-Identifier: Apache-2.0 // Copyright Authors of Cilium -package loader +package config import ( - "github.com/cilium/cilium/pkg/datapath/config" "github.com/cilium/cilium/pkg/datapath/linux/probes" datapath "github.com/cilium/cilium/pkg/datapath/types" "github.com/cilium/cilium/pkg/option" ) -func nodeConfig(lnc *datapath.LocalNodeConfiguration) config.Node { - node := *config.NewNode() +func NodeConfig(lnc *datapath.LocalNodeConfiguration) Node { + node := *NewNode() if lnc.ServiceLoopbackIPv4 != nil { node.ServiceLoopbackIPv4 = [4]byte(lnc.ServiceLoopbackIPv4.To4()) @@ -34,5 +33,7 @@ func nodeConfig(lnc *datapath.LocalNodeConfiguration) config.Node { node.SupportsFibLookupSkipNeigh = probes.HaveFibLookupSkipNeigh() == nil + node.TracingIPOptionType = uint8(option.Config.IPTracingOptionType) + return node } diff --git a/pkg/datapath/config/gen.go b/pkg/datapath/config/gen.go index f28d43d229e0f..86f42e3c2f345 100644 --- a/pkg/datapath/config/gen.go +++ b/pkg/datapath/config/gen.go @@ -13,3 +13,4 @@ package config //go:generate go run github.com/cilium/cilium/tools/dpgen -path ../../../bpf/bpf_overlay.o -embed Node -kind object -name BPFOverlay -out overlay_config.go //go:generate go run github.com/cilium/cilium/tools/dpgen -path ../../../bpf/bpf_network.o -embed Node -kind object -name BPFNetwork -out network_config.go //go:generate go run github.com/cilium/cilium/tools/dpgen -path ../../../bpf/bpf_wireguard.o -embed Node -kind object -name BPFWireguard -out wireguard_config.go +//go:generate go run github.com/cilium/cilium/tools/dpgen -path ../../../bpf/bpf_sock.o -embed Node -kind object -name BPFSock -out sock_config.go diff --git a/pkg/datapath/config/host_config.go b/pkg/datapath/config/host_config.go index fe1ae07c87fa9..c85698b9874e5 100644 --- a/pkg/datapath/config/host_config.go +++ b/pkg/datapath/config/host_config.go @@ -14,6 +14,8 @@ type BPFHost struct { DeviceMTU uint16 `config:"device_mtu"` // Pass traffic with extended IP protocols. EnableExtendedIPProtocols bool `config:"enable_extended_ip_protocols"` + // Enable routes when service has 0 endpoints. + EnableNoServiceEndpointsRoutable bool `config:"enable_no_service_endpoints_routable"` // Masquerade traffic to remote nodes. EnableRemoteNodeMasquerade bool `config:"enable_remote_node_masquerade"` // Length of the Ethernet header on this device. May be set to zero on L2-less @@ -38,7 +40,7 @@ type BPFHost struct { } func NewBPFHost(node Node) *BPFHost { - return &BPFHost{0x5dc, false, false, 0xe, 0x0, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + return &BPFHost{0x5dc, false, false, false, 0xe, 0x0, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, [4]byte{0x0, 0x0, 0x0, 0x0}, [16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, false, 0x0, node} diff --git a/pkg/datapath/config/lxc_config.go b/pkg/datapath/config/lxc_config.go index cd13d705bf63d..d858be27a7581 100644 --- a/pkg/datapath/config/lxc_config.go +++ b/pkg/datapath/config/lxc_config.go @@ -14,6 +14,8 @@ type BPFLXC struct { DeviceMTU uint16 `config:"device_mtu"` // Pass traffic with extended IP protocols. EnableExtendedIPProtocols bool `config:"enable_extended_ip_protocols"` + // Enable routes when service has 0 endpoints. + EnableNoServiceEndpointsRoutable bool `config:"enable_no_service_endpoints_routable"` // Masquerade traffic to remote nodes. EnableRemoteNodeMasquerade bool `config:"enable_remote_node_masquerade"` // The endpoint's security ID. @@ -45,7 +47,7 @@ type BPFLXC struct { } func NewBPFLXC(node Node) *BPFLXC { - return &BPFLXC{0x5dc, false, false, 0x0, [4]byte{0x0, 0x0, 0x0, 0x0}, + return &BPFLXC{0x5dc, false, false, false, 0x0, [4]byte{0x0, 0x0, 0x0, 0x0}, [16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x0, 0x0, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, [4]byte{0x0, 0x0, 0x0, 0x0}, diff --git a/pkg/datapath/config/node_config.go b/pkg/datapath/config/node_config.go index fb4aeb31baf46..88d3341da4db6 100644 --- a/pkg/datapath/config/node_config.go +++ b/pkg/datapath/config/node_config.go @@ -23,6 +23,8 @@ type Node struct { TracePayloadLen uint32 `config:"trace_payload_len"` // Length of payload to capture when tracing overlay packets. TracePayloadLenOverlay uint32 `config:"trace_payload_len_overlay"` + // The IP option type to use for packet tracing. + TracingIPOptionType uint8 `config:"tracing_ip_option_type"` } func NewNode() *Node { @@ -30,5 +32,5 @@ func NewNode() *Node { [16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, [4]byte{0x0, 0x0, 0x0, 0x0}, [16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, - false, 0x0, 0x0} + false, 0x0, 0x0, 0x0} } diff --git a/pkg/datapath/config/overlay_config.go b/pkg/datapath/config/overlay_config.go index ff631279585b9..55a5ec9728f8e 100644 --- a/pkg/datapath/config/overlay_config.go +++ b/pkg/datapath/config/overlay_config.go @@ -14,6 +14,8 @@ type BPFOverlay struct { DeviceMTU uint16 `config:"device_mtu"` // Pass traffic with extended IP protocols. EnableExtendedIPProtocols bool `config:"enable_extended_ip_protocols"` + // Enable routes when service has 0 endpoints. + EnableNoServiceEndpointsRoutable bool `config:"enable_no_service_endpoints_routable"` // Masquerade traffic to remote nodes. EnableRemoteNodeMasquerade bool `config:"enable_remote_node_masquerade"` // Ifindex of the interface the bpf program is attached to. @@ -31,7 +33,7 @@ type BPFOverlay struct { } func NewBPFOverlay(node Node) *BPFOverlay { - return &BPFOverlay{0x5dc, false, false, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + return &BPFOverlay{0x5dc, false, false, false, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, [4]byte{0x0, 0x0, 0x0, 0x0}, [16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, false, node} diff --git a/pkg/datapath/config/sock_config.go b/pkg/datapath/config/sock_config.go new file mode 100644 index 0000000000000..70d168f849560 --- /dev/null +++ b/pkg/datapath/config/sock_config.go @@ -0,0 +1,22 @@ +// Code generated by dpgen. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package config + +// BPFSock is a configuration struct for a Cilium datapath object. Warning: do +// not instantiate directly! Always use [NewBPFSock] to ensure the default +// values configured in the ELF are honored. +type BPFSock struct { + // Pass traffic with extended IP protocols. + EnableExtendedIPProtocols bool `config:"enable_extended_ip_protocols"` + // Enable routes when service has 0 endpoints. + EnableNoServiceEndpointsRoutable bool `config:"enable_no_service_endpoints_routable"` + + Node +} + +func NewBPFSock(node Node) *BPFSock { + return &BPFSock{false, false, node} +} diff --git a/pkg/datapath/config/wireguard_config.go b/pkg/datapath/config/wireguard_config.go index 122bb50def7a3..c8b1200991a41 100644 --- a/pkg/datapath/config/wireguard_config.go +++ b/pkg/datapath/config/wireguard_config.go @@ -14,6 +14,8 @@ type BPFWireguard struct { DeviceMTU uint16 `config:"device_mtu"` // Pass traffic with extended IP protocols. EnableExtendedIPProtocols bool `config:"enable_extended_ip_protocols"` + // Enable routes when service has 0 endpoints. + EnableNoServiceEndpointsRoutable bool `config:"enable_no_service_endpoints_routable"` // Masquerade traffic to remote nodes. EnableRemoteNodeMasquerade bool `config:"enable_remote_node_masquerade"` // Ifindex of the interface the bpf program is attached to. @@ -31,7 +33,7 @@ type BPFWireguard struct { } func NewBPFWireguard(node Node) *BPFWireguard { - return &BPFWireguard{0x5dc, false, false, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + return &BPFWireguard{0x5dc, false, false, false, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, [4]byte{0x0, 0x0, 0x0, 0x0}, [16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, false, node} diff --git a/pkg/datapath/config/xdp_config.go b/pkg/datapath/config/xdp_config.go index 065a325123920..73e18901d0184 100644 --- a/pkg/datapath/config/xdp_config.go +++ b/pkg/datapath/config/xdp_config.go @@ -14,6 +14,8 @@ type BPFXDP struct { DeviceMTU uint16 `config:"device_mtu"` // Pass traffic with extended IP protocols. EnableExtendedIPProtocols bool `config:"enable_extended_ip_protocols"` + // Enable routes when service has 0 endpoints. + EnableNoServiceEndpointsRoutable bool `config:"enable_no_service_endpoints_routable"` // Masquerade traffic to remote nodes. EnableRemoteNodeMasquerade bool `config:"enable_remote_node_masquerade"` // Ifindex of the interface the bpf program is attached to. @@ -31,7 +33,7 @@ type BPFXDP struct { } func NewBPFXDP(node Node) *BPFXDP { - return &BPFXDP{0x5dc, false, false, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + return &BPFXDP{0x5dc, false, false, false, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, [4]byte{0x0, 0x0, 0x0, 0x0}, [16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, false, node} diff --git a/pkg/datapath/fake/types/ipsec.go b/pkg/datapath/fake/types/ipsec.go index d491afdd01752..f302897bdaf28 100644 --- a/pkg/datapath/fake/types/ipsec.go +++ b/pkg/datapath/fake/types/ipsec.go @@ -61,10 +61,6 @@ func (c IPsecConfig) Enabled() bool { return c.EnableIPsec } -func (c IPsecConfig) EncryptedOverlayEnabled() bool { - return c.EncryptedOverlay -} - func (c IPsecConfig) UseCiliumInternalIP() bool { return c.UseCiliumInternalIPForIPsec } diff --git a/pkg/datapath/fake/types/iptables_manager.go b/pkg/datapath/fake/types/iptables_manager.go index 1c054a5f27e46..7227a80934288 100644 --- a/pkg/datapath/fake/types/iptables_manager.go +++ b/pkg/datapath/fake/types/iptables_manager.go @@ -35,3 +35,9 @@ func (m *FakeIptablesManager) InstallNoTrackRules(ip netip.Addr, port uint16) { func (m *FakeIptablesManager) RemoveNoTrackRules(ip netip.Addr, port uint16) { } + +func (m *FakeIptablesManager) AddNoTrackHostPorts(namespace, name string, ports []string) { +} + +func (m *FakeIptablesManager) RemoveNoTrackHostPorts(namespace, name string) { +} diff --git a/pkg/datapath/iptables/custom_chain.go b/pkg/datapath/iptables/custom_chain.go index 11b288262928a..5100d982c9050 100644 --- a/pkg/datapath/iptables/custom_chain.go +++ b/pkg/datapath/iptables/custom_chain.go @@ -130,7 +130,7 @@ func (c *customChain) doAdd(prog runnable) error { return nil } -func (c *customChain) add(ipv4, ipv6 bool) error { +func (c *customChain) add(ipv4, ipv6 bool, ip4tables, ip6tables iptablesInterface) error { if ipv4 { if err := c.doAdd(ip4tables); err != nil { return err @@ -162,7 +162,7 @@ func (c *customChain) doRename(prog runnable, newName string) error { return nil } -func (c *customChain) rename(ipv4, ipv6 bool, name string) error { +func (c *customChain) rename(ipv4, ipv6 bool, name string, ip4tables, ip6tables iptablesInterface) error { if ipv4 { if err := c.doRename(ip4tables, name); err != nil { return err @@ -201,7 +201,7 @@ func (c *customChain) doRemove(prog iptablesInterface) error { return nil } -func (c *customChain) remove(ipv4, ipv6 bool) error { +func (c *customChain) remove(ipv4, ipv6 bool, ip4tables, ip6tables iptablesInterface) error { if ipv4 { if err := c.doRemove(ip4tables); err != nil { return err @@ -233,7 +233,7 @@ func (c *customChain) doInstallFeeder(prog iptablesInterface, prepend bool) erro return nil } -func (c *customChain) installFeeder(ipv4, ipv6, prepend bool) error { +func (c *customChain) installFeeder(ipv4, ipv6, prepend bool, ip4tables, ip6tables iptablesInterface) error { if ipv4 { if err := c.doInstallFeeder(ip4tables, prepend); err != nil { return err diff --git a/pkg/datapath/iptables/iptables.go b/pkg/datapath/iptables/iptables.go index 1c9256dac5253..37a2cc7426444 100644 --- a/pkg/datapath/iptables/iptables.go +++ b/pkg/datapath/iptables/iptables.go @@ -12,6 +12,7 @@ import ( "net/netip" "os" "regexp" + "slices" "strconv" "strings" @@ -27,6 +28,7 @@ import ( "github.com/cilium/cilium/pkg/byteorder" "github.com/cilium/cilium/pkg/cidr" "github.com/cilium/cilium/pkg/command/exec" + "github.com/cilium/cilium/pkg/container/set" "github.com/cilium/cilium/pkg/datapath/iptables/ipset" "github.com/cilium/cilium/pkg/datapath/linux/linux_defaults" "github.com/cilium/cilium/pkg/datapath/linux/route" @@ -78,6 +80,10 @@ var ( }, } } + + noTrackSupportedProtos = []lb.L4Type{ + lb.TCP, lb.UDP, + } ) const ( @@ -119,12 +125,6 @@ func (ipt *ipt) initArgs(ctx context.Context, waitSeconds int) { } } -// package name is iptables so we use ip4tables internally for "iptables" -var ( - ip4tables = &ipt{prog: "iptables", ipset: ipset.CiliumNodeIPSetV4} - ip6tables = &ipt{prog: "ip6tables", ipset: ipset.CiliumNodeIPSetV6} -) - func (ipt *ipt) getProg() string { return ipt.prog } @@ -254,6 +254,37 @@ func (m *Manager) removeCiliumRules(table string, prog runnable, match string) e return nil } +type podAndNameSpace struct { + podName, namespace string +} + +// noTrackHostPortsByPod stores the ports passed in with the annotation /no-track-host-ports +// indexed by pod name+namespace +type noTrackHostPortsByPod map[podAndNameSpace]set.Set[lb.L4Addr] + +func (ports noTrackHostPortsByPod) flatten() set.Set[lb.L4Addr] { + result := set.Set[lb.L4Addr]{} + + for _, p := range ports { + result.Merge(p) + } + + return result +} + +func (ports noTrackHostPortsByPod) exclude(key podAndNameSpace) noTrackHostPortsByPod { + result := make(noTrackHostPortsByPod) + + for k, p := range ports { + if key == k { + continue + } + result[k] = p + } + + return result +} + // Manager manages the iptables-related configuration for Cilium. type Manager struct { logger *slog.Logger @@ -261,6 +292,8 @@ type Manager struct { // GetProxyPort() methods. lock lock.Mutex + ip4tables, ip6tables iptablesInterface + sysctl sysctl.Sysctl cfg Config @@ -280,13 +313,15 @@ type Manager struct { } type reconcilerParams struct { - clock clock.WithTicker - localNodeStore *node.LocalNodeStore - db *statedb.DB - devices statedb.Table[*tables.Device] - proxies chan reconciliationRequest[proxyInfo] - addNoTrackPod chan reconciliationRequest[noTrackPodInfo] - delNoTrackPod chan reconciliationRequest[noTrackPodInfo] + clock clock.WithTicker + localNodeStore *node.LocalNodeStore + db *statedb.DB + devices statedb.Table[*tables.Device] + proxies chan reconciliationRequest[proxyInfo] + addNoTrackPod chan reconciliationRequest[noTrackPodInfo] + delNoTrackPod chan reconciliationRequest[noTrackPodInfo] + addNoTrackHostPorts chan reconciliationRequest[noTrackHostPortsPodInfo] + delNoTrackHostPorts chan reconciliationRequest[podAndNameSpace] } type params struct { @@ -317,18 +352,26 @@ func newIptablesManager(p params) datapath.IptablesManager { sharedCfg: p.SharedCfg, argsInit: lock.NewStoppableWaitGroup(), reconcilerParams: reconcilerParams{ - clock: clock.RealClock{}, - localNodeStore: p.LocalNodeStore, - db: p.DB, - devices: p.Devices, - proxies: make(chan reconciliationRequest[proxyInfo]), - addNoTrackPod: make(chan reconciliationRequest[noTrackPodInfo]), - delNoTrackPod: make(chan reconciliationRequest[noTrackPodInfo]), + clock: clock.RealClock{}, + localNodeStore: p.LocalNodeStore, + db: p.DB, + devices: p.Devices, + proxies: make(chan reconciliationRequest[proxyInfo]), + addNoTrackPod: make(chan reconciliationRequest[noTrackPodInfo]), + delNoTrackPod: make(chan reconciliationRequest[noTrackPodInfo]), + addNoTrackHostPorts: make(chan reconciliationRequest[noTrackHostPortsPodInfo]), + delNoTrackHostPorts: make(chan reconciliationRequest[podAndNameSpace]), }, haveIp6tables: true, cniConfigManager: p.CNIConfigManager, } + ip4tables := &ipt{prog: "iptables", ipset: ipset.CiliumNodeIPSetV4} + ip6tables := &ipt{prog: "ip6tables", ipset: ipset.CiliumNodeIPSetV6} + + iptMgr.ip4tables = ip4tables + iptMgr.ip6tables = ip6tables + // init iptables/ip6tables wait arguments before using them in the reconciler or in the manager (e.g: GetProxyPorts) initDone := iptMgr.argsInit.Add() p.Lifecycle.Append(cell.Hook{ @@ -365,6 +408,8 @@ func newIptablesManager(p params) datapath.IptablesManager { iptMgr.doInstallProxyRules, iptMgr.installNoTrackRules, iptMgr.removeNoTrackRules, + iptMgr.setNoTrackHostPorts, + iptMgr.removeNoTrackHostPorts, ) }), ) @@ -394,7 +439,7 @@ func (m *Manager) Start(ctx cell.HookContext) error { } for _, table := range []string{"nat", "mangle", "raw", "filter"} { - if err := ip4tables.runProg([]string{"-t", table, "-L", "-n"}); err != nil { + if err := m.ip4tables.runProg([]string{"-t", table, "-L", "-n"}); err != nil { if m.sharedCfg.InstallIptRules { m.logger.Warn("iptables table is not available on this system", logfields.Error, err, @@ -405,7 +450,7 @@ func (m *Manager) Start(ctx cell.HookContext) error { } for _, table := range []string{"mangle", "raw", "filter"} { - if err := ip6tables.runProg([]string{"-t", table, "-L", "-n"}); err != nil { + if err := m.ip6tables.runProg([]string{"-t", table, "-L", "-n"}); err != nil { if m.sharedCfg.InstallIptRules { m.logger.Debug("ip6tables table is not available on this system", logfields.Error, err, @@ -438,7 +483,7 @@ func (m *Manager) Start(ctx cell.HookContext) error { } } - if err := ip4tables.runProg([]string{"-t", "mangle", "-L", "-m", "socket", "-n"}); err != nil { + if err := m.ip4tables.runProg([]string{"-t", "mangle", "-L", "-m", "socket", "-n"}); err != nil { if m.sharedCfg.InstallIptRules { m.logger.Warn("iptables match socket is not available (try installing xt_socket kernel module)", logfields.Error, err) } @@ -474,6 +519,8 @@ func (m *Manager) Stop(ctx cell.HookContext) error { close(m.reconcilerParams.proxies) close(m.reconcilerParams.addNoTrackPod) close(m.reconcilerParams.delNoTrackPod) + close(m.reconcilerParams.addNoTrackHostPorts) + close(m.reconcilerParams.delNoTrackHostPorts) return nil } @@ -505,12 +552,12 @@ func (m *Manager) removeRules(prefix string) error { // Set of tables that have had iptables rules in any Cilium version tables := []string{"nat", "mangle", "raw", "filter"} for _, t := range tables { - if err := m.removeCiliumRules(t, ip4tables, prefix+"CILIUM_"); err != nil { + if err := m.removeCiliumRules(t, m.ip4tables, prefix+"CILIUM_"); err != nil { return err } if m.haveIp6tables { - if err := m.removeCiliumRules(t, ip6tables, prefix+"CILIUM_"); err != nil { + if err := m.removeCiliumRules(t, m.ip6tables, prefix+"CILIUM_"); err != nil { return err } } @@ -518,7 +565,7 @@ func (m *Manager) removeRules(prefix string) error { for _, c := range ciliumChains { c.name = prefix + c.name - if err := c.remove(true, m.haveIp6tables); err != nil { + if err := c.remove(true, m.haveIp6tables, m.ip4tables, m.ip6tables); err != nil { return err } } @@ -529,7 +576,7 @@ func (m *Manager) removeRules(prefix string) error { // renameChains renames iptables chains installed by Cilium. func (m *Manager) renameChains(prefix string) error { for _, c := range ciliumChains { - if err := c.rename(true, m.haveIp6tables, prefix+c.name); err != nil { + if err := c.rename(true, m.haveIp6tables, prefix+c.name, m.ip4tables, m.ip6tables); err != nil { return err } } @@ -551,14 +598,14 @@ func (m *Manager) inboundProxyRedirectRule(cmd string) []string { // excluding traffic for the loopback device. toProxyMark := fmt.Sprintf("%#08x", linux_defaults.MagicMarkIsToProxy) matchFromIPSecEncrypt := fmt.Sprintf("%#08x/%#08x", linux_defaults.RouteMarkEncrypt, linux_defaults.RouteMarkMask) - matchProxyToWorld := fmt.Sprintf("%#08x/%#08x", linux_defaults.MarkProxyToWorld, linux_defaults.RouteMarkMask) + matchSkipTProxy := fmt.Sprintf("%#08x/%#08x", linux_defaults.MarkSkipTProxy, linux_defaults.RouteMarkMask) return []string{ "-t", "mangle", cmd, ciliumPreMangleChain, "-m", "socket", "--transparent", "!", "-o", "lo", "-m", "mark", "!", "--mark", matchFromIPSecEncrypt, - "-m", "mark", "!", "--mark", matchProxyToWorld, + "-m", "mark", "!", "--mark", matchSkipTProxy, "-m", "comment", "--comment", "cilium: any->pod redirect proxied traffic to host proxy", "-j", "MARK", "--set-mark", toProxyMark} @@ -650,7 +697,7 @@ func (m *Manager) installStaticProxyRules() error { if m.sharedCfg.EnableIPv4 { // No conntrack for traffic to proxy - if err := ip4tables.runProg([]string{ + if err := m.ip4tables.runProg([]string{ "-t", "raw", "-A", ciliumPreRawChain, "-m", "mark", "--mark", matchToProxy, @@ -661,7 +708,7 @@ func (m *Manager) installStaticProxyRules() error { // Explicit ACCEPT for the proxy traffic. Needed when the INPUT defaults to DROP. // Matching needs to be the same as for the NOTRACK rule above. - if err := ip4tables.runProg([]string{ + if err := m.ip4tables.runProg([]string{ "-t", "filter", "-A", ciliumInputChain, "-m", "mark", "--mark", matchToProxy, @@ -671,7 +718,7 @@ func (m *Manager) installStaticProxyRules() error { } // No conntrack for proxy return traffic that is heading to lxc+ - if err := ip4tables.runProg([]string{ + if err := m.ip4tables.runProg([]string{ "-t", "raw", "-A", ciliumOutputRawChain, "-o", "lxc+", @@ -682,7 +729,7 @@ func (m *Manager) installStaticProxyRules() error { } // No conntrack for proxy return traffic that is heading to cilium_host - if err := ip4tables.runProg([]string{ + if err := m.ip4tables.runProg([]string{ "-t", "raw", "-A", ciliumOutputRawChain, "-o", defaults.HostDevice, @@ -694,7 +741,7 @@ func (m *Manager) installStaticProxyRules() error { // No conntrack for proxy forward traffic that is heading to cilium_host if m.sharedCfg.EnableIPSec { - if err := ip4tables.runProg([]string{ + if err := m.ip4tables.runProg([]string{ "-t", "raw", "-A", ciliumOutputRawChain, "-o", defaults.HostDevice, @@ -706,7 +753,7 @@ func (m *Manager) installStaticProxyRules() error { } // No conntrack for proxy upstream traffic that is heading to lxc+ - if err := ip4tables.runProg([]string{ + if err := m.ip4tables.runProg([]string{ "-t", "raw", "-A", ciliumOutputRawChain, "-o", "lxc+", @@ -717,7 +764,7 @@ func (m *Manager) installStaticProxyRules() error { } // No conntrack for proxy upstream traffic that is heading to cilium_host - if err := ip4tables.runProg([]string{ + if err := m.ip4tables.runProg([]string{ "-t", "raw", "-A", ciliumOutputRawChain, "-o", defaults.HostDevice, @@ -729,7 +776,7 @@ func (m *Manager) installStaticProxyRules() error { // Explicit ACCEPT for the proxy return traffic. Needed when the OUTPUT defaults to DROP. // Matching needs to be the same as for the NOTRACK rule above. - if err := ip4tables.runProg([]string{ + if err := m.ip4tables.runProg([]string{ "-t", "filter", "-A", ciliumOutputChain, "-m", "mark", "--mark", matchFromProxy, @@ -740,7 +787,7 @@ func (m *Manager) installStaticProxyRules() error { // Explicit ACCEPT for the l7 proxy upstream traffic. Needed when the OUTPUT defaults to DROP. // TODO: See if this is really needed. We do not have an ACCEPT for normal proxy upstream traffic. - if err := ip4tables.runProg([]string{ + if err := m.ip4tables.runProg([]string{ "-t", "filter", "-A", ciliumOutputChain, "-m", "mark", "--mark", matchL7ProxyUpstream, @@ -751,7 +798,7 @@ func (m *Manager) installStaticProxyRules() error { if m.haveSocketMatch { // Direct inbound TPROXYed traffic towards the socket - if err := ip4tables.runProg(m.inboundProxyRedirectRule("-A")); err != nil { + if err := m.ip4tables.runProg(m.inboundProxyRedirectRule("-A")); err != nil { return err } } @@ -759,7 +806,7 @@ func (m *Manager) installStaticProxyRules() error { if m.sharedCfg.EnableIPv6 { // No conntrack for traffic to ingress proxy - if err := ip6tables.runProg([]string{ + if err := m.ip6tables.runProg([]string{ "-t", "raw", "-A", ciliumPreRawChain, "-m", "mark", "--mark", matchToProxy, @@ -770,7 +817,7 @@ func (m *Manager) installStaticProxyRules() error { // Explicit ACCEPT for the proxy traffic. Needed when the INPUT defaults to DROP. // Matching needs to be the same as for the NOTRACK rule above. - if err := ip6tables.runProg([]string{ + if err := m.ip6tables.runProg([]string{ "-t", "filter", "-A", ciliumInputChain, "-m", "mark", "--mark", matchToProxy, @@ -780,7 +827,7 @@ func (m *Manager) installStaticProxyRules() error { } // No conntrack for proxy return traffic that is heading to lxc+ - if err := ip6tables.runProg([]string{ + if err := m.ip6tables.runProg([]string{ "-t", "raw", "-A", ciliumOutputRawChain, "-o", "lxc+", @@ -791,7 +838,7 @@ func (m *Manager) installStaticProxyRules() error { } // No conntrack for proxy return traffic that is heading to cilium_host - if err := ip6tables.runProg([]string{ + if err := m.ip6tables.runProg([]string{ "-t", "raw", "-A", ciliumOutputRawChain, "-o", defaults.HostDevice, @@ -803,7 +850,7 @@ func (m *Manager) installStaticProxyRules() error { // No conntrack for proxy forward traffic that is heading to cilium_host if m.sharedCfg.EnableIPSec { - if err := ip6tables.runProg([]string{ + if err := m.ip6tables.runProg([]string{ "-t", "raw", "-A", ciliumOutputRawChain, "-o", defaults.HostDevice, @@ -815,7 +862,7 @@ func (m *Manager) installStaticProxyRules() error { } // No conntrack for proxy upstream traffic that is heading to lxc+ - if err := ip6tables.runProg([]string{ + if err := m.ip6tables.runProg([]string{ "-t", "raw", "-A", ciliumOutputRawChain, "-o", "lxc+", @@ -826,7 +873,7 @@ func (m *Manager) installStaticProxyRules() error { } // No conntrack for proxy upstream traffic that is heading to cilium_host - if err := ip6tables.runProg([]string{ + if err := m.ip6tables.runProg([]string{ "-t", "raw", "-A", ciliumOutputRawChain, "-o", defaults.HostDevice, @@ -838,7 +885,7 @@ func (m *Manager) installStaticProxyRules() error { // Explicit ACCEPT for the proxy return traffic. Needed when the OUTPUT defaults to DROP. // Matching needs to be the same as for the NOTRACK rule above. - if err := ip6tables.runProg([]string{ + if err := m.ip6tables.runProg([]string{ "-t", "filter", "-A", ciliumOutputChain, "-m", "mark", "--mark", matchFromProxy, @@ -849,7 +896,7 @@ func (m *Manager) installStaticProxyRules() error { if m.haveSocketMatch { // Direct inbound TPROXYed traffic towards the socket - if err := ip6tables.runProg(m.inboundProxyRedirectRule("-A")); err != nil { + if err := m.ip6tables.runProg(m.inboundProxyRedirectRule("-A")); err != nil { return err } } @@ -897,13 +944,13 @@ var tproxyMatch = regexp.MustCompile("CILIUM_PRE_mangle .*cilium: TPROXY") // copies old proxy rules func (m *Manager) copyProxyRules(oldChain string, match string) error { if m.sharedCfg.EnableIPv4 { - if err := m.doCopyProxyRules(ip4tables, "mangle", tproxyMatch, match, oldChain, ciliumPreMangleChain); err != nil { + if err := m.doCopyProxyRules(m.ip4tables, "mangle", tproxyMatch, match, oldChain, ciliumPreMangleChain); err != nil { return err } } if m.sharedCfg.EnableIPv6 { - if err := m.doCopyProxyRules(ip6tables, "mangle", tproxyMatch, match, oldChain, ciliumPreMangleChain); err != nil { + if err := m.doCopyProxyRules(m.ip6tables, "mangle", tproxyMatch, match, oldChain, ciliumPreMangleChain); err != nil { return err } } @@ -1112,6 +1159,30 @@ func (m *Manager) RemoveNoTrackRules(ip netip.Addr, port uint16) { <-reconciled } +func (m *Manager) AddNoTrackHostPorts(namespace, name string, ports []string) { + if !m.sharedCfg.InstallNoConntrackIptRules { + return + } + + podName := podAndNameSpace{podName: name, namespace: namespace} + + reconciled := make(chan struct{}) + m.reconcilerParams.addNoTrackHostPorts <- reconciliationRequest[noTrackHostPortsPodInfo]{noTrackHostPortsPodInfo{podName, ports}, reconciled} + <-reconciled +} + +func (m *Manager) RemoveNoTrackHostPorts(namespace, name string) { + if !m.sharedCfg.InstallNoConntrackIptRules { + return + } + + podName := podAndNameSpace{podName: name, namespace: namespace} + + reconciled := make(chan struct{}) + m.reconcilerParams.delNoTrackHostPorts <- reconciliationRequest[podAndNameSpace]{podName, reconciled} + <-reconciled +} + func (m *Manager) InstallProxyRules(proxyPort uint16, name string) { reconciled := make(chan struct{}) m.reconcilerParams.proxies <- reconciliationRequest[proxyInfo]{proxyInfo{name, proxyPort}, reconciled} @@ -1126,12 +1197,12 @@ func (m *Manager) doInstallProxyRules(proxyPort uint16, name string) error { } if m.sharedCfg.EnableIPv4 { - if err := m.addProxyRules(ip4tables, "127.0.0.1", proxyPort, name); err != nil { + if err := m.addProxyRules(m.ip4tables, "127.0.0.1", proxyPort, name); err != nil { return err } } if m.sharedCfg.EnableIPv6 { - if err := m.addProxyRules(ip6tables, "::1", proxyPort, name); err != nil { + if err := m.addProxyRules(m.ip6tables, "::1", proxyPort, name); err != nil { return err } } @@ -1142,9 +1213,9 @@ func (m *Manager) doInstallProxyRules(proxyPort uint16, name string) error { // GetProxyPorts enumerates all existing TPROXY rules in the datapath installed earlier with // InstallProxyRules and returns all proxy ports found. func (m *Manager) GetProxyPorts() map[string]uint16 { - prog := ip4tables + prog := m.ip4tables if !m.sharedCfg.EnableIPv4 { - prog = ip6tables + prog = m.ip6tables } return m.doGetProxyPorts(prog) @@ -1200,12 +1271,12 @@ func (m *Manager) getDeliveryInterface(ifName string) string { func (m *Manager) installForwardChainRules(ifName, localDeliveryInterface, forwardChain string) error { if m.sharedCfg.EnableIPv4 { - if err := m.installForwardChainRulesIpX(ip4tables, ifName, localDeliveryInterface, forwardChain); err != nil { + if err := m.installForwardChainRulesIpX(m.ip4tables, ifName, localDeliveryInterface, forwardChain); err != nil { return err } } if m.sharedCfg.EnableIPv6 { - return m.installForwardChainRulesIpX(ip6tables, ifName, localDeliveryInterface, forwardChain) + return m.installForwardChainRulesIpX(m.ip6tables, ifName, localDeliveryInterface, forwardChain) } return nil @@ -1313,7 +1384,7 @@ func (m *Manager) installMasqueradeRules( devices = m.sharedCfg.MasqueradeInterfaces } family := netlink.FAMILY_V4 - if prog == ip6tables { + if prog == m.ip6tables { family = netlink.FAMILY_V6 } initialPass := true @@ -1459,7 +1530,7 @@ func (m *Manager) installMasqueradeRules( } loopbackAddr := "127.0.0.1" - if prog == ip6tables { + if prog == m.ip6tables { loopbackAddr = "::1" } @@ -1599,7 +1670,7 @@ func (m *Manager) doInstallRules(state desiredState, firstInit bool) error { func (m *Manager) installRules(state desiredState) error { // Install new rules for _, c := range ciliumChains { - if err := c.add(m.sharedCfg.EnableIPv4, m.sharedCfg.EnableIPv6); err != nil { + if err := c.add(m.sharedCfg.EnableIPv4, m.sharedCfg.EnableIPv6, m.ip4tables, m.ip6tables); err != nil { // do not return error for chain creation that are linked to disabled feeder rules if isDisabledChain(m.cfg.DisableIptablesFeederRules, c.hook) { m.logger.Warn( @@ -1613,7 +1684,7 @@ func (m *Manager) installRules(state desiredState) error { } } - if err := m.installTunnelNoTrackRules(ip4tables, ip6tables); err != nil { + if err := m.installTunnelNoTrackRules(m.ip4tables, m.ip6tables); err != nil { return fmt.Errorf("cannot install tunnel no track rules: %w", err) } @@ -1632,12 +1703,12 @@ func (m *Manager) installRules(state desiredState) error { } if m.sharedCfg.EnableIPv4 { - if err := m.installHostTrafficMarkRule(ip4tables); err != nil { + if err := m.installHostTrafficMarkRule(m.ip4tables); err != nil { return fmt.Errorf("cannot install host traffic mark rule: %w", err) } if m.sharedCfg.IptablesMasqueradingIPv4Enabled && state.localNodeInfo.internalIPv4 != nil { - if err := m.installMasqueradeRules(ip4tables, state.devices.UnsortedList(), localDeliveryInterface, + if err := m.installMasqueradeRules(m.ip4tables, state.devices.UnsortedList(), localDeliveryInterface, m.remoteSNATDstAddrExclusionCIDR(state.localNodeInfo.ipv4NativeRoutingCIDR, state.localNodeInfo.ipv4AllocCIDR), state.localNodeInfo.ipv4AllocCIDR, state.localNodeInfo.internalIPv4.String(), @@ -1648,12 +1719,12 @@ func (m *Manager) installRules(state desiredState) error { } if m.sharedCfg.EnableIPv6 { - if err := m.installHostTrafficMarkRule(ip6tables); err != nil { + if err := m.installHostTrafficMarkRule(m.ip6tables); err != nil { return fmt.Errorf("cannot install host traffic mark rule: %w", err) } if m.sharedCfg.IptablesMasqueradingIPv6Enabled && state.localNodeInfo.internalIPv6 != nil { - if err := m.installMasqueradeRules(ip6tables, state.devices.UnsortedList(), localDeliveryInterface, + if err := m.installMasqueradeRules(m.ip6tables, state.devices.UnsortedList(), localDeliveryInterface, m.remoteSNATDstAddrExclusionCIDR(state.localNodeInfo.ipv6NativeRoutingCIDR, state.localNodeInfo.ipv6AllocCIDR), state.localNodeInfo.ipv6AllocCIDR, state.localNodeInfo.internalIPv6.String(), @@ -1681,7 +1752,7 @@ func (m *Manager) installRules(state desiredState) error { podsCIDR := state.localNodeInfo.ipv4NativeRoutingCIDR if m.sharedCfg.InstallNoConntrackIptRules && podsCIDR != "" { - if err := m.addNoTrackPodTrafficRules(ip4tables, podsCIDR); err != nil { + if err := m.addNoTrackPodTrafficRules(m.ip4tables, podsCIDR); err != nil { return fmt.Errorf("cannot install pod traffic no CT rules: %w", err) } } @@ -1692,6 +1763,16 @@ func (m *Manager) installRules(state desiredState) error { } } + noTrackPorts := groupL4AddrsByProto(state.noTrackHostPorts.flatten().AsSlice()) + for _, proto := range noTrackSupportedProtos { + if ports, ok := noTrackPorts[proto]; ok && len(ports) > 0 { + if err := m.installHostNoTrackRules(proto, ports); err != nil { + return err + } + } + + } + for _, c := range ciliumChains { // do not install feeder for chains that are set to be disabled if isDisabledChain(m.cfg.DisableIptablesFeederRules, c.hook) { @@ -1702,7 +1783,7 @@ func (m *Manager) installRules(state desiredState) error { continue } - if err := c.installFeeder(m.sharedCfg.EnableIPv4, m.sharedCfg.EnableIPv6, m.cfg.PrependIptablesChains); err != nil { + if err := c.installFeeder(m.sharedCfg.EnableIPv4, m.sharedCfg.EnableIPv6, m.cfg.PrependIptablesChains, m.ip4tables, m.ip6tables); err != nil { return fmt.Errorf("cannot install feeder rule: %w", err) } } @@ -1744,7 +1825,7 @@ func (m *Manager) addCiliumAcceptXfrmRules() error { return nil } - insertAcceptXfrm := func(ipt *ipt, table, chain string) error { + insertAcceptXfrm := func(ipt iptablesInterface, table, chain string) error { matchFromIPSecEncrypt := fmt.Sprintf("%#08x/%#08x", linux_defaults.RouteMarkDecrypt, linux_defaults.RouteMarkMask) matchFromIPSecDecrypt := fmt.Sprintf("%#08x/%#08x", linux_defaults.RouteMarkEncrypt, linux_defaults.RouteMarkMask) @@ -1771,13 +1852,13 @@ func (m *Manager) addCiliumAcceptXfrmRules() error { switch chain.table { case "filter", "nat": if m.sharedCfg.EnableIPv4 { - if err := insertAcceptXfrm(ip4tables, chain.table, chain.name); err != nil { + if err := insertAcceptXfrm(m.ip4tables, chain.table, chain.name); err != nil { return err } } // ip6tables chain exists only if chain.ipv6 is true if m.sharedCfg.EnableIPv6 && chain.ipv6 { - if err := insertAcceptXfrm(ip6tables, chain.table, chain.name); err != nil { + if err := insertAcceptXfrm(m.ip6tables, chain.table, chain.name); err != nil { return err } } @@ -1788,12 +1869,12 @@ func (m *Manager) addCiliumAcceptXfrmRules() error { func (m *Manager) addCiliumNoTrackXfrmRules() (err error) { if m.sharedCfg.EnableIPv4 { - if err = m.ciliumNoTrackXfrmRules(ip4tables, "-I"); err != nil { + if err = m.ciliumNoTrackXfrmRules(m.ip4tables, "-I"); err != nil { return } } if m.sharedCfg.EnableIPv6 { - return m.ciliumNoTrackXfrmRules(ip6tables, "-I") + return m.ciliumNoTrackXfrmRules(m.ip6tables, "-I") } return nil } @@ -1805,9 +1886,9 @@ func (m *Manager) installNoTrackRules(addr netip.Addr, port uint16) error { return nil } - prog := ip4tables + prog := m.ip4tables if addr.Is6() { - prog = ip6tables + prog = m.ip6tables } for _, p := range noTrackPorts(port) { if err := m.endpointNoTrackRules(prog, "-A", addr.String(), p); err != nil { @@ -1824,9 +1905,9 @@ func (m *Manager) removeNoTrackRules(addr netip.Addr, port uint16) error { return nil } - prog := ip4tables + prog := m.ip4tables if addr.Is6() { - prog = ip6tables + prog = m.ip6tables } for _, p := range noTrackPorts(port) { if err := m.endpointNoTrackRules(prog, "-D", addr.String(), p); err != nil { @@ -1889,7 +1970,7 @@ func (m *Manager) addCiliumENIRules() error { // Note: these rules need the xt_connmark module (iptables usually // loads it when required, unless loading modules after boot has been // disabled). - if err := ip4tables.runProg([]string{ + if err := m.ip4tables.runProg([]string{ "-t", "mangle", "-A", ciliumPreMangleChain, "-i", iface.Attrs().Name, @@ -1899,7 +1980,7 @@ func (m *Manager) addCiliumENIRules() error { return err } - return ip4tables.runProg([]string{ + return m.ip4tables.runProg([]string{ "-t", "mangle", "-A", ciliumPreMangleChain, "-i", "lxc+", @@ -1972,3 +2053,168 @@ func allEgressMasqueradeCmds(allocRange string, snatDstExclusionCIDR string, } return cmds } + +// hostNoTrackMultiPorts installs or removes a notrack rule matching multiple ports. +// the use case for this is to skip conntrack when a pod uses hostNetwork to improve performance (pps/rps) +// since conntrack affects the performance under load - which can occur under DDoS or traffic spikes for instance. +func (m *Manager) hostNoTrackMultiPorts(prog iptablesInterface, cmd, proto string, ports []uint16) error { + // sort the slice containing the ports, and turn them into strings + slices.Sort(ports) + strPorts := make([]string, len(ports)) + for i, p := range ports { + strPorts[i] = strconv.FormatUint(uint64(p), 10) + } + + if err := prog.runProg([]string{ + "-t", "raw", + cmd, ciliumPreRawChain, + "-p", strings.ToLower(proto), + "--match", "multiport", + "--dports", strings.Join(strPorts, ","), + "-m", "comment", "--comment", "cilium no-track-host-ports", + "-j", "CT", + "--notrack"}); err != nil { + return err + } + + if err := prog.runProg([]string{ + "-t", "raw", + cmd, ciliumOutputRawChain, + "-p", strings.ToLower(proto), + "--match", "multiport", + "--sports", strings.Join(strPorts, ","), + "-m", "comment", "--comment", "cilium no-track-host-ports return traffic", + "-j", "CT", + "--notrack"}); err != nil { + return err + } + + return nil +} + +// groupL4AddrsByProto iterates over a slice of ports and returns a map with the port numbers +// grouped by protocol. +func groupL4AddrsByProto(ports []lb.L4Addr) map[lb.L4Type][]uint16 { + result := make(map[lb.L4Type][]uint16) + + for _, p := range ports { + result[p.Protocol] = append(result[p.Protocol], p.Port) + } + + return result +} + +// replaceNoTrackHostPortRules replaces noTrackHostPort rules on a state change. the new ruleset is added, and the previous one is removed. +func (m *Manager) replaceNoTrackHostPortRules(oldPorts, newPorts map[lb.L4Type][]uint16) error { + for _, proto := range noTrackSupportedProtos { + oldP := set.NewSet(oldPorts[proto]...) + newP := set.NewSet(newPorts[proto]...) + + if newP.Equal(oldP) { + continue + } + + if !newP.Empty() { + if err := m.installHostNoTrackRules(proto, newP.AsSlice()); err != nil { + return err + } + } + + if !oldP.Empty() { + if err := m.cleanupHostNoTrackRules(proto, oldP.AsSlice()); err != nil { + return err + } + } + } + + return nil +} + +// installHostNoTrackRules installs a hostNoTrack multiport rule +func (m *Manager) installHostNoTrackRules(proto lb.L4Type, p []uint16) error { + if m.sharedCfg.EnableIPv4 { + if err := m.hostNoTrackMultiPorts(m.ip4tables, "-A", proto, p); err != nil { + return err + } + } + + if m.sharedCfg.EnableIPv6 { + if err := m.hostNoTrackMultiPorts(m.ip6tables, "-A", proto, p); err != nil { + return err + } + } + + return nil +} + +// cleanupHostNoTrackRules cleans up a hostNoTrack multiport rule +func (m *Manager) cleanupHostNoTrackRules(proto lb.L4Type, p []uint16) error { + if m.sharedCfg.EnableIPv4 { + if err := m.hostNoTrackMultiPorts(m.ip4tables, "-D", proto, p); err != nil { + return err + } + } + + if m.sharedCfg.EnableIPv6 { + if err := m.hostNoTrackMultiPorts(m.ip6tables, "-D", proto, p); err != nil { + return err + } + } + + return nil +} + +// removeNoTrackHostPorts removes notrack rules if the global set changes after removing an entry for the pod. +func (m *Manager) removeNoTrackHostPorts(currentState noTrackHostPortsByPod, podName podAndNameSpace) error { + oldPorts := groupL4AddrsByProto(currentState.flatten().AsSlice()) + delete(currentState, podName) + newPorts := groupL4AddrsByProto(currentState.flatten().AsSlice()) + + return m.replaceNoTrackHostPortRules(oldPorts, newPorts) +} + +// setNoTrackHostPorts ensures that the notrack rules for host network pods are in place. +// it removes the previous ruleset and adds the new ruleset if the global set of ports have changed. +func (m *Manager) setNoTrackHostPorts(currentState noTrackHostPortsByPod, podName podAndNameSpace, ports []string) error { + parsedPorts := make([]lb.L4Addr, 0, len(ports)) + + for _, p := range ports { + if p == "" { + continue + } + + parsed, err := lb.L4AddrFromString(p) + if err != nil { + return fmt.Errorf("failed to parse port/proto for %s: %w", p, err) + } + + switch parsed.Protocol { + case lb.TCP, lb.UDP: + parsedPorts = append(parsedPorts, parsed) + default: + return fmt.Errorf("protocol %s is not unsupported for no-track-host-ports", parsed.Protocol) + } + } + + newSet := set.NewSet(parsedPorts...) + if newSet.Empty() { + return m.removeNoTrackHostPorts(currentState, podName) + } + + currentPodPorts, ok := currentState[podName] + if ok && currentPodPorts.Equal(newSet) { + // no changes + return nil + } + + // grab the previously installed state + oldPorts := groupL4AddrsByProto(currentState.flatten().AsSlice()) + + // update current state, since we now know it has changed (or is a new entry altogether) + currentState[podName] = newSet + + newPorts := groupL4AddrsByProto(currentState.flatten().AsSlice()) + + return m.replaceNoTrackHostPortRules(oldPorts, newPorts) + +} diff --git a/pkg/datapath/iptables/iptables_test.go b/pkg/datapath/iptables/iptables_test.go index 2a565c5af456d..28fe50a7fd412 100644 --- a/pkg/datapath/iptables/iptables_test.go +++ b/pkg/datapath/iptables/iptables_test.go @@ -862,3 +862,176 @@ func TestTunnelNoTrackRulesTunnelingDisabled(t *testing.T) { t.Error(err) } } + +func TestNoTrackHostPorts(t *testing.T) { + mockIp4tables := &mockIptables{t: t, prog: "iptables"} + mockIp6tables := &mockIptables{t: t, prog: "ip6tables"} + + testMgr := &Manager{ + haveIp6tables: false, + haveSocketMatch: true, + haveBPFSocketAssign: false, + ipEarlyDemuxDisabled: false, + sharedCfg: SharedConfig{ + EnableIPv4: true, + EnableIPv6: true, + }, + ip4tables: mockIp4tables, + ip6tables: mockIp6tables, + } + + testState := make(noTrackHostPortsByPod) + + var testPod, testPod2 podAndNameSpace + + t.Run("test adding notrack host port", func(t *testing.T) { + testPod = podAndNameSpace{namespace: "testns", podName: "testpod1"} + ports := []string{"443/tcp"} + + mockIp4tables.expectations = append(mockIp4tables.expectations, []expectation{ + {args: "-t raw -A CILIUM_PRE_raw -p tcp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -A CILIUM_OUTPUT_raw -p tcp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + }...) + + mockIp6tables.expectations = append(mockIp6tables.expectations, []expectation{ + {args: "-t raw -A CILIUM_PRE_raw -p tcp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -A CILIUM_OUTPUT_raw -p tcp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + }...) + + assert.NoError(t, testMgr.setNoTrackHostPorts(testState, testPod, ports)) + assert.Contains(t, testState, testPod) + + // add a second time does not error out or trigger iptables commands + assert.NoError(t, testMgr.setNoTrackHostPorts(testState, testPod, ports)) + + // add same port entry for another pod, make sure we dont see any new iptables commands + testPod2 = podAndNameSpace{namespace: "testns", podName: "testpod2"} + assert.NoError(t, testMgr.setNoTrackHostPorts(testState, testPod2, ports)) + + assert.Contains(t, testState, testPod) + assert.Contains(t, testState, testPod2) + + // add another port. we expect to see the new rules being added, and then the 2 previous rules being deleted + mockIp4tables.expectations = append(mockIp4tables.expectations, []expectation{ + {args: "-t raw -A CILIUM_PRE_raw -p tcp --match multiport --dports 443,999 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -A CILIUM_OUTPUT_raw -p tcp --match multiport --sports 443,999 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + + {args: "-t raw -D CILIUM_PRE_raw -p tcp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -D CILIUM_OUTPUT_raw -p tcp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + }...) + + mockIp6tables.expectations = append(mockIp6tables.expectations, []expectation{ + {args: "-t raw -A CILIUM_PRE_raw -p tcp --match multiport --dports 443,999 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -A CILIUM_OUTPUT_raw -p tcp --match multiport --sports 443,999 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + + {args: "-t raw -D CILIUM_PRE_raw -p tcp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -D CILIUM_OUTPUT_raw -p tcp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + }...) + + assert.NoError(t, testMgr.setNoTrackHostPorts(testState, testPod2, []string{"999/tcp", "443/tcp"})) + assert.NoError(t, mockIp4tables.checkExpectations()) + assert.NoError(t, mockIp6tables.checkExpectations()) + }) + + t.Run("test changing the port", func(t *testing.T) { + mockIp4tables.expectations = []expectation{ + {args: "-t raw -A CILIUM_PRE_raw -p tcp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -A CILIUM_OUTPUT_raw -p tcp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + + {args: "-t raw -D CILIUM_PRE_raw -p tcp --match multiport --dports 443,999 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -D CILIUM_OUTPUT_raw -p tcp --match multiport --sports 443,999 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + + {args: "-t raw -A CILIUM_PRE_raw -p udp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -A CILIUM_OUTPUT_raw -p udp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + } + + mockIp6tables.expectations = []expectation{ + {args: "-t raw -A CILIUM_PRE_raw -p tcp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -A CILIUM_OUTPUT_raw -p tcp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + + {args: "-t raw -D CILIUM_PRE_raw -p tcp --match multiport --dports 443,999 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -D CILIUM_OUTPUT_raw -p tcp --match multiport --sports 443,999 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + + {args: "-t raw -A CILIUM_PRE_raw -p udp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -A CILIUM_OUTPUT_raw -p udp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + } + + assert.NoError(t, testMgr.setNoTrackHostPorts(testState, testPod2, []string{"443/udp"})) + assert.NoError(t, mockIp4tables.checkExpectations()) + assert.NoError(t, mockIp6tables.checkExpectations()) + }) + + t.Run("test empty ports annotation", func(t *testing.T) { + testPod3 := podAndNameSpace{namespace: "123", podName: "321"} + mockIp4tables.expectations = []expectation{ + {args: "-t raw -A CILIUM_PRE_raw -p udp --match multiport --dports 443,8123 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -A CILIUM_OUTPUT_raw -p udp --match multiport --sports 443,8123 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + + {args: "-t raw -D CILIUM_PRE_raw -p udp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -D CILIUM_OUTPUT_raw -p udp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + } + + mockIp6tables.expectations = []expectation{ + {args: "-t raw -A CILIUM_PRE_raw -p udp --match multiport --dports 443,8123 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -A CILIUM_OUTPUT_raw -p udp --match multiport --sports 443,8123 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + + {args: "-t raw -D CILIUM_PRE_raw -p udp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -D CILIUM_OUTPUT_raw -p udp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + } + + assert.NoError(t, testMgr.setNoTrackHostPorts(testState, testPod3, []string{"8123/udp"})) + + mockIp4tables.expectations = append(mockIp4tables.expectations, []expectation{ + {args: "-t raw -A CILIUM_PRE_raw -p udp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -A CILIUM_OUTPUT_raw -p udp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + + {args: "-t raw -D CILIUM_PRE_raw -p udp --match multiport --dports 443,8123 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -D CILIUM_OUTPUT_raw -p udp --match multiport --sports 443,8123 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + }...) + + mockIp6tables.expectations = append(mockIp6tables.expectations, []expectation{ + {args: "-t raw -A CILIUM_PRE_raw -p udp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -A CILIUM_OUTPUT_raw -p udp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + + {args: "-t raw -D CILIUM_PRE_raw -p udp --match multiport --dports 443,8123 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -D CILIUM_OUTPUT_raw -p udp --match multiport --sports 443,8123 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + }...) + + // empty port should trigger a delete-like behaviour + assert.NoError(t, testMgr.setNoTrackHostPorts(testState, testPod3, strings.Split("", "/"))) + + assert.NoError(t, mockIp4tables.checkExpectations()) + assert.NoError(t, mockIp6tables.checkExpectations()) + }) + + t.Run("test deleting notrack host port", func(t *testing.T) { + mockIp4tables.expectations = []expectation{ + {args: "-t raw -D CILIUM_PRE_raw -p udp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -D CILIUM_OUTPUT_raw -p udp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + } + + mockIp6tables.expectations = []expectation{ + {args: "-t raw -D CILIUM_PRE_raw -p udp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -D CILIUM_OUTPUT_raw -p udp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + } + + assert.NoError(t, testMgr.removeNoTrackHostPorts(testState, testPod2)) + + // now we update the previous one with an empty set. should cause rules to be deleted since this pod is the last reference for port 443 + mockIp4tables.expectations = append(mockIp4tables.expectations, []expectation{ + {args: "-t raw -D CILIUM_PRE_raw -p tcp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -D CILIUM_OUTPUT_raw -p tcp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + }...) + + mockIp6tables.expectations = append(mockIp6tables.expectations, []expectation{ + {args: "-t raw -D CILIUM_PRE_raw -p tcp --match multiport --dports 443 -m comment --comment cilium no-track-host-ports -j CT --notrack"}, + {args: "-t raw -D CILIUM_OUTPUT_raw -p tcp --match multiport --sports 443 -m comment --comment cilium no-track-host-ports return traffic -j CT --notrack"}, + }...) + + assert.NoError(t, testMgr.setNoTrackHostPorts(testState, testPod, nil)) + + assert.NoError(t, mockIp4tables.checkExpectations()) + assert.NoError(t, mockIp6tables.checkExpectations()) + assert.Empty(t, testState) + }) +} diff --git a/pkg/datapath/iptables/reconciler.go b/pkg/datapath/iptables/reconciler.go index 6c00ad3b07293..1b2f5a15c8de4 100644 --- a/pkg/datapath/iptables/reconciler.go +++ b/pkg/datapath/iptables/reconciler.go @@ -24,10 +24,11 @@ import ( type desiredState struct { installRules bool - devices sets.Set[string] - localNodeInfo localNodeInfo - proxies map[string]proxyInfo - noTrackPods sets.Set[noTrackPodInfo] + devices sets.Set[string] + localNodeInfo localNodeInfo + proxies map[string]proxyInfo + noTrackPods sets.Set[noTrackPodInfo] + noTrackHostPorts noTrackHostPortsByPod } type localNodeInfo struct { @@ -112,6 +113,11 @@ type noTrackPodInfo struct { port uint16 } +type noTrackHostPortsPodInfo struct { + podKey podAndNameSpace + ports []string +} + func reconciliationLoop( ctx context.Context, log *slog.Logger, @@ -122,6 +128,8 @@ func reconciliationLoop( updateProxyRules func(proxyPort uint16, name string) error, installNoTrackRules func(addr netip.Addr, port uint16) error, removeNoTrackRules func(addr netip.Addr, port uint16) error, + addNoTrackHostPorts func(currentState noTrackHostPortsByPod, podName podAndNameSpace, ports []string) error, + removeNoTrackHostPorts func(currentState noTrackHostPortsByPod, podName podAndNameSpace) error, ) error { // The minimum interval between reconciliation attempts const minReconciliationInterval = 200 * time.Millisecond @@ -136,9 +144,10 @@ func reconciliationLoop( fullLogLimiter := logging.NewLimiter(10*time.Second, 3) state := desiredState{ - installRules: installIptRules, - proxies: make(map[string]proxyInfo), - noTrackPods: sets.New[noTrackPodInfo](), + installRules: installIptRules, + proxies: make(map[string]proxyInfo), + noTrackPods: sets.New[noTrackPodInfo](), + noTrackHostPorts: make(noTrackHostPortsByPod), } ctx, cancel := context.WithCancel(ctx) @@ -293,6 +302,53 @@ stop: } else { close(req.updated) } + + case req, ok := <-params.addNoTrackHostPorts: + if !ok { + break stop + } + + if firstInit { + stateChanged = true + updatedChs = append(updatedChs, req.updated) + continue + } + + if err := addNoTrackHostPorts(state.noTrackHostPorts, req.info.podKey, req.info.ports); err != nil { + if partialLogLimiter.Allow() { + log.Error("failed to set up no-track-host-ports, will retry a full reconciliation", logfields.Error, err) + } + + // incremental rules update failed, schedule a full iptables reconciliation + stateChanged = true + updatedChs = append(updatedChs, req.updated) + } else { + close(req.updated) + } + + case req, ok := <-params.delNoTrackHostPorts: + if !ok { + break stop + } + + if firstInit { + stateChanged = true + updatedChs = append(updatedChs, req.updated) + continue + } + + if err := removeNoTrackHostPorts(state.noTrackHostPorts, req.info); err != nil { + if partialLogLimiter.Allow() { + log.Error("failed to remove no-track-host-ports, will retry a full reconciliation", logfields.Error, err) + } + + // incremental rules update failed, schedule a full iptables reconciliation + stateChanged = true + updatedChs = append(updatedChs, req.updated) + } else { + close(req.updated) + } + case <-refresher.C(): stateChanged = true case <-ticker.C(): @@ -343,6 +399,10 @@ stop: } for range params.delNoTrackPod { } + for range params.addNoTrackHostPorts { + } + for range params.delNoTrackHostPorts { + } return nil } diff --git a/pkg/datapath/iptables/reconciler_test.go b/pkg/datapath/iptables/reconciler_test.go index a628b7bf549f0..14dd8dc880d3c 100644 --- a/pkg/datapath/iptables/reconciler_test.go +++ b/pkg/datapath/iptables/reconciler_test.go @@ -20,8 +20,10 @@ import ( baseclocktest "k8s.io/utils/clock/testing" "github.com/cilium/cilium/pkg/cidr" + "github.com/cilium/cilium/pkg/container/set" "github.com/cilium/cilium/pkg/datapath/tables" "github.com/cilium/cilium/pkg/hive" + lb "github.com/cilium/cilium/pkg/loadbalancer" "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/node" "github.com/cilium/cilium/pkg/node/addressing" @@ -58,13 +60,15 @@ func TestReconciliationLoop(t *testing.T) { store = store_ health = health_.NewScope("iptables-reconciler-test") params = &reconcilerParams{ - clock: clock, - localNodeStore: store_, - db: db_, - devices: devices_, - proxies: make(chan reconciliationRequest[proxyInfo]), - addNoTrackPod: make(chan reconciliationRequest[noTrackPodInfo]), - delNoTrackPod: make(chan reconciliationRequest[noTrackPodInfo]), + clock: clock, + localNodeStore: store_, + db: db_, + devices: devices_, + proxies: make(chan reconciliationRequest[proxyInfo]), + addNoTrackPod: make(chan reconciliationRequest[noTrackPodInfo]), + delNoTrackPod: make(chan reconciliationRequest[noTrackPodInfo]), + addNoTrackHostPorts: make(chan reconciliationRequest[noTrackHostPortsPodInfo]), + delNoTrackHostPorts: make(chan reconciliationRequest[podAndNameSpace]), } }), ) @@ -112,6 +116,35 @@ func TestReconciliationLoop(t *testing.T) { return nil } + setNoTrackHostPortsFunc := func(currentState noTrackHostPortsByPod, pod podAndNameSpace, ports []string) error { + mu.Lock() + defer mu.Unlock() + + parsedPorts := make([]lb.L4Addr, 0, len(ports)) + + for _, p := range ports { + parsed, err := lb.L4AddrFromString(p) + + if err != nil { + return fmt.Errorf("failed to parse port/proto for %s: %w", p, err) + } + + parsedPorts = append(parsedPorts, parsed) + } + + state.noTrackHostPorts[pod] = set.NewSet(parsedPorts...) + + return nil + } + + removeNoTrackHostPortsFunc := func(currentState noTrackHostPortsByPod, pod podAndNameSpace) error { + mu.Lock() + defer mu.Unlock() + state.noTrackHostPorts = currentState.exclude(pod) + assert.NotContains(t, state.noTrackHostPorts, pod) + return nil + } + testCases := []struct { name string action func() @@ -330,6 +363,106 @@ func TestReconciliationLoop(t *testing.T) { ), }, }, + { + name: "add no track host port", + action: func() { + params.addNoTrackHostPorts <- reconciliationRequest[noTrackHostPortsPodInfo]{ + info: noTrackHostPortsPodInfo{podKey: podAndNameSpace{podName: "mytest1", namespace: "mytestns"}, ports: []string{"443/tcp"}}, + updated: make(chan struct{}), + } + }, + expected: desiredState{ + installRules: true, + devices: sets.New("test-1", "test-2"), + localNodeInfo: localNodeInfo{ + internalIPv4: net.ParseIP("2.2.2.2"), + ipv4AllocCIDR: cidr.MustParseCIDR("6.6.6.0/24").String(), + ipv6AllocCIDR: cidr.MustParseCIDR("3002:bbbb::/96").String(), + }, + proxies: map[string]proxyInfo{ + "proxy-test-1": { + name: "proxy-test-1", + port: 9090, + }, + "proxy-test-2": { + name: "proxy-test-2", + port: 9091, + }, + }, + noTrackPods: sets.New( + noTrackPodInfo{netip.MustParseAddr("11.22.33.44"), 10002}, + ), + noTrackHostPorts: noTrackHostPortsByPod{ + podAndNameSpace{podName: "mytest1", namespace: "mytestns"}: set.NewSet(lb.L4Addr{Protocol: "TCP", Port: 443}), + }, + }, + }, + { + name: "change no track host port", + action: func() { + params.addNoTrackHostPorts <- reconciliationRequest[noTrackHostPortsPodInfo]{ + info: noTrackHostPortsPodInfo{podKey: podAndNameSpace{podName: "mytest1", namespace: "mytestns"}, ports: []string{"443/udp"}}, + updated: make(chan struct{}), + } + }, + expected: desiredState{ + installRules: true, + devices: sets.New("test-1", "test-2"), + localNodeInfo: localNodeInfo{ + internalIPv4: net.ParseIP("2.2.2.2"), + ipv4AllocCIDR: cidr.MustParseCIDR("6.6.6.0/24").String(), + ipv6AllocCIDR: cidr.MustParseCIDR("3002:bbbb::/96").String(), + }, + proxies: map[string]proxyInfo{ + "proxy-test-1": { + name: "proxy-test-1", + port: 9090, + }, + "proxy-test-2": { + name: "proxy-test-2", + port: 9091, + }, + }, + noTrackPods: sets.New( + noTrackPodInfo{netip.MustParseAddr("11.22.33.44"), 10002}, + ), + noTrackHostPorts: noTrackHostPortsByPod{ + podAndNameSpace{podName: "mytest1", namespace: "mytestns"}: set.NewSet(lb.L4Addr{Protocol: "UDP", Port: 443}), + }, + }, + }, + { + name: "delete no track host port", + action: func() { + params.delNoTrackHostPorts <- reconciliationRequest[podAndNameSpace]{ + info: podAndNameSpace{podName: "mytest1", namespace: "mytestns"}, + updated: make(chan struct{}), + } + }, + expected: desiredState{ + installRules: true, + devices: sets.New("test-1", "test-2"), + localNodeInfo: localNodeInfo{ + internalIPv4: net.ParseIP("2.2.2.2"), + ipv4AllocCIDR: cidr.MustParseCIDR("6.6.6.0/24").String(), + ipv6AllocCIDR: cidr.MustParseCIDR("3002:bbbb::/96").String(), + }, + proxies: map[string]proxyInfo{ + "proxy-test-1": { + name: "proxy-test-1", + port: 9090, + }, + "proxy-test-2": { + name: "proxy-test-2", + port: 9091, + }, + }, + noTrackPods: sets.New( + noTrackPodInfo{netip.MustParseAddr("11.22.33.44"), 10002}, + ), + noTrackHostPorts: noTrackHostPortsByPod{}, + }, + }, } ctx, cancel := context.WithCancel(context.Background()) @@ -345,7 +478,12 @@ func TestReconciliationLoop(t *testing.T) { errs := make(chan error) go func() { defer close(errs) - errs <- reconciliationLoop(ctx, tlog, health, true, params, updateFunc, updateProxyFunc, installNoTrackFunc, removeNoTrackFunc) + errs <- reconciliationLoop( + ctx, tlog, health, true, + params, updateFunc, updateProxyFunc, + installNoTrackFunc, removeNoTrackFunc, + setNoTrackHostPortsFunc, removeNoTrackHostPortsFunc, + ) }() // wait for reconciler to react to the initial state @@ -379,7 +517,7 @@ func TestReconciliationLoop(t *testing.T) { return false } return true - }, 10*time.Second, 10*time.Millisecond, "expected state not reached. %v", tc.expected) + }, 10*time.Second, 1*time.Second, "expected state not reached. %v", tc.expected) }) } @@ -408,6 +546,8 @@ func TestReconciliationLoop(t *testing.T) { close(params.proxies) close(params.addNoTrackPod) close(params.delNoTrackPod) + close(params.addNoTrackHostPorts) + close(params.delNoTrackHostPorts) cancel() assert.NoError(t, <-errs) } @@ -434,6 +574,19 @@ func assertIptablesState(current, expected desiredState) error { return fmt.Errorf("expected no tracking pods info to be %v, found %v", expected.noTrackPods.UnsortedList(), current.noTrackPods.UnsortedList()) } + for k, v := range current.noTrackHostPorts { + if !v.Equal(expected.noTrackHostPorts[k]) { + return fmt.Errorf("expected no-host-track-ports info to be %v, found %v", + expected.noTrackHostPorts[k].AsSlice(), v.AsSlice()) + } + } + for k, v := range expected.noTrackHostPorts { + if !v.Equal(current.noTrackHostPorts[k]) { + return fmt.Errorf("expected no-host-track-ports info to be %v, found %v", + v.AsSlice(), current.noTrackHostPorts[k].AsSlice()) + } + } + return nil } @@ -442,6 +595,12 @@ func (s desiredState) deepCopy() desiredState { copy(ipv4, s.localNodeInfo.internalIPv4) ipv6 := make(net.IP, len(s.localNodeInfo.internalIPv6)) copy(ipv6, s.localNodeInfo.internalIPv6) + + noTrackHostPorts := make(noTrackHostPortsByPod, len(s.noTrackHostPorts)) + for k, v := range s.noTrackHostPorts { + noTrackHostPorts[k] = v.Clone() + } + return desiredState{ installRules: s.installRules, devices: s.devices.Clone(), @@ -453,7 +612,8 @@ func (s desiredState) deepCopy() desiredState { ipv4NativeRoutingCIDR: s.localNodeInfo.ipv4NativeRoutingCIDR, ipv6NativeRoutingCIDR: s.localNodeInfo.ipv6NativeRoutingCIDR, }, - proxies: maps.Clone(s.proxies), - noTrackPods: s.noTrackPods.Clone(), + proxies: maps.Clone(s.proxies), + noTrackPods: s.noTrackPods.Clone(), + noTrackHostPorts: noTrackHostPorts, } } diff --git a/pkg/datapath/linux/config/config.go b/pkg/datapath/linux/config/config.go index cce4b322b844b..afb8a6485b869 100644 --- a/pkg/datapath/linux/config/config.go +++ b/pkg/datapath/linux/config/config.go @@ -48,6 +48,7 @@ import ( "github.com/cilium/cilium/pkg/maps/nodemap" "github.com/cilium/cilium/pkg/maps/policymap" "github.com/cilium/cilium/pkg/maps/vtep" + "github.com/cilium/cilium/pkg/maps/vtep_policy" "github.com/cilium/cilium/pkg/netns" "github.com/cilium/cilium/pkg/option" wgtypes "github.com/cilium/cilium/pkg/wireguard/types" @@ -65,7 +66,6 @@ type HeaderfileWriter struct { nodeExtraDefineFns []dpdef.Fn sysctl sysctl.Sysctl kprCfg kpr.KPRConfig - ipsecConfig datapath.IPsecConfig } func NewHeaderfileWriter(p WriterParams) (datapath.ConfigWriter, error) { @@ -83,7 +83,6 @@ func NewHeaderfileWriter(p WriterParams) (datapath.ConfigWriter, error) { log: p.Log, sysctl: p.Sysctl, kprCfg: p.KPRConfig, - ipsecConfig: p.IPSecConfig, }, nil } @@ -176,7 +175,6 @@ func (h *HeaderfileWriter) WriteNodeConfig(w io.Writer, cfg *datapath.LocalNodeC cDefinesMap["LOCAL_NODE_ID"] = fmt.Sprintf("%d", identity.ReservedIdentityRemoteNode) cDefinesMap["REMOTE_NODE_ID"] = fmt.Sprintf("%d", identity.GetReservedID(labels.IDNameRemoteNode)) cDefinesMap["KUBE_APISERVER_NODE_ID"] = fmt.Sprintf("%d", identity.GetReservedID(labels.IDNameKubeAPIServer)) - cDefinesMap["ENCRYPTED_OVERLAY_ID"] = fmt.Sprintf("%d", identity.GetReservedID(labels.IDNameEncryptedOverlay)) cDefinesMap["CILIUM_LB_SERVICE_MAP_MAX_ENTRIES"] = fmt.Sprintf("%d", cfg.LBConfig.LBServiceMapEntries) cDefinesMap["CILIUM_LB_BACKENDS_MAP_MAX_ENTRIES"] = fmt.Sprintf("%d", cfg.LBConfig.LBBackendMapEntries) cDefinesMap["CILIUM_LB_REV_NAT_MAP_MAX_ENTRIES"] = fmt.Sprintf("%d", cfg.LBConfig.LBRevNatEntries) @@ -240,10 +238,6 @@ func (h *HeaderfileWriter) WriteNodeConfig(w io.Writer, cfg *datapath.LocalNodeC if cfg.EnableIPSec { cDefinesMap["ENABLE_IPSEC"] = "1" - - if h.ipsecConfig.EncryptedOverlayEnabled() { - cDefinesMap["ENABLE_ENCRYPTED_OVERLAY"] = "1" - } } if cfg.EnableWireguard { @@ -595,6 +589,7 @@ func (h *HeaderfileWriter) WriteNodeConfig(w io.Writer, cfg *datapath.LocalNodeC } cDefinesMap["VTEP_MAP_SIZE"] = fmt.Sprintf("%d", vtep.MaxEntries) + cDefinesMap["VTEP_POLICY_MAP_SIZE"] = fmt.Sprintf("%d", vtep_policy.MaxEntries) vlanFilter, err := vlanFilterMacros(nativeDevices) if err != nil { diff --git a/pkg/datapath/linux/ipsec.go b/pkg/datapath/linux/ipsec.go index 6f1c2feca0bf5..3afd20dd392c3 100644 --- a/pkg/datapath/linux/ipsec.go +++ b/pkg/datapath/linux/ipsec.go @@ -733,17 +733,35 @@ func (n *linuxNodeHandler) removeEncryptRules() error { Protocol: linux_defaults.RTProto, } - rule.Mark = linux_defaults.RouteMarkDecrypt + rule.Mark = linux_defaults.RouteMarkEncrypt if err := route.DeleteRule(netlink.FAMILY_V4, rule); err != nil { if !os.IsNotExist(err) { - return fmt.Errorf("delete previous IPv4 decrypt rule failed: %w", err) + return fmt.Errorf("delete previous IPv4 encrypt rule failed: %w", err) } } rule.Mark = linux_defaults.RouteMarkEncrypt + if err := route.DeleteRule(netlink.FAMILY_V6, rule); err != nil { + if !os.IsNotExist(err) && !errors.Is(err, unix.EAFNOSUPPORT) { + return fmt.Errorf("delete previous IPv6 encrypt rule failed: %w", err) + } + } + return nil + +} + +func (n *linuxNodeHandler) removeDecryptRules() error { + rule := route.Rule{ + Priority: 1, + Mask: linux_defaults.RouteMarkMask, + Table: linux_defaults.RouteTableIPSec, + Protocol: linux_defaults.RTProto, + } + + rule.Mark = linux_defaults.RouteMarkDecrypt if err := route.DeleteRule(netlink.FAMILY_V4, rule); err != nil { if !os.IsNotExist(err) { - return fmt.Errorf("delete previous IPv4 encrypt rule failed: %w", err) + return fmt.Errorf("delete previous IPv4 decrypt rule failed: %w", err) } } @@ -758,12 +776,6 @@ func (n *linuxNodeHandler) removeEncryptRules() error { } } - rule.Mark = linux_defaults.RouteMarkEncrypt - if err := route.DeleteRule(netlink.FAMILY_V6, rule); err != nil { - if !os.IsNotExist(err) && !errors.Is(err, unix.EAFNOSUPPORT) { - return fmt.Errorf("delete previous IPv6 encrypt rule failed: %w", err) - } - } return nil } diff --git a/pkg/datapath/linux/ipsec/cell.go b/pkg/datapath/linux/ipsec/cell.go index c3536fa2bc457..d280356e132a1 100644 --- a/pkg/datapath/linux/ipsec/cell.go +++ b/pkg/datapath/linux/ipsec/cell.go @@ -62,7 +62,6 @@ var defaultUserConfig = UserConfig{ EnableIPsec: false, EnableIPsecKeyWatcher: true, EnableIPsecXfrmStateCaching: true, - EnableIPsecEncryptedOverlay: false, UseCiliumInternalIPForIPsec: false, DNSProxyInsecureSkipTransparentModeCheck: false, IPsecKeyFile: "", @@ -73,7 +72,6 @@ type UserConfig struct { EnableIPsec bool EnableIPsecKeyWatcher bool EnableIPsecXfrmStateCaching bool - EnableIPsecEncryptedOverlay bool UseCiliumInternalIPForIPsec bool DNSProxyInsecureSkipTransparentModeCheck bool IPsecKeyFile string @@ -85,7 +83,7 @@ func (def UserConfig) Flags(flags *pflag.FlagSet) { flags.Bool(types.EnableIPsecKeyWatcher, def.EnableIPsecKeyWatcher, "Enable watcher for IPsec key. If disabled, a restart of the agent will be necessary on key rotations.") flags.Bool(types.EnableIPSecXfrmStateCaching, def.EnableIPsecXfrmStateCaching, "Enable XfrmState cache for IPSec. Significantly reduces CPU usage in large clusters.") flags.MarkHidden(types.EnableIPSecXfrmStateCaching) - flags.Bool(types.EnableIPSecEncryptedOverlay, def.EnableIPsecEncryptedOverlay, "Enable IPsec encrypted overlay. If enabled tunnel traffic will be encrypted before leaving the host. Requires ipsec and tunnel mode vxlan to be enabled.") + flags.MarkDeprecated(types.EnableIPSecEncryptedOverlay, "Encrypted overlay is the default behavior for IPsec.") flags.Bool(types.UseCiliumInternalIPForIPsec, def.UseCiliumInternalIPForIPsec, "Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation") flags.MarkHidden(types.UseCiliumInternalIPForIPsec) flags.Bool(types.DNSProxyInsecureSkipTransparentModeCheck, def.DNSProxyInsecureSkipTransparentModeCheck, "Allows DNS proxy transparent mode to be disabled even if encryption is enabled. Enabling this flag and disabling DNS proxy transparent mode will cause proxied DNS traffic to leave the node unencrypted.") @@ -104,10 +102,6 @@ func (c Config) Enabled() bool { return c.EnableIPsec } -func (c Config) EncryptedOverlayEnabled() bool { - return c.EnableIPsecEncryptedOverlay -} - func (c Config) UseCiliumInternalIP() bool { return c.UseCiliumInternalIPForIPsec } diff --git a/pkg/datapath/linux/ipsec/cell_test.go b/pkg/datapath/linux/ipsec/cell_test.go index 20f72b1830eff..00df198a94369 100644 --- a/pkg/datapath/linux/ipsec/cell_test.go +++ b/pkg/datapath/linux/ipsec/cell_test.go @@ -160,7 +160,6 @@ func TestPrivileged_TestIPSecCell(t *testing.T) { EnableIPsec: ipsecEnabled, EnableIPsecKeyWatcher: true, EnableIPsecXfrmStateCaching: true, - EnableIPsecEncryptedOverlay: false, UseCiliumInternalIPForIPsec: false, DNSProxyInsecureSkipTransparentModeCheck: false, IPsecKeyFile: keyFile, diff --git a/pkg/datapath/linux/ipsec/ipsec_linux.go b/pkg/datapath/linux/ipsec/ipsec_linux.go index 3c1fe43fac8f5..dae7f70a9ef67 100644 --- a/pkg/datapath/linux/ipsec/ipsec_linux.go +++ b/pkg/datapath/linux/ipsec/ipsec_linux.go @@ -69,9 +69,6 @@ const ( // DefaultReqID is the default reqid used for all IPSec rules. DefaultReqID = 1 - - // EncryptedOverlayReqID is the reqid used for encrypting overlay traffic. - EncryptedOverlayReqID = 2 ) type dir string diff --git a/pkg/datapath/linux/linux_defaults/linux_defaults.go b/pkg/datapath/linux/linux_defaults/linux_defaults.go index 262abde3e3560..bdc5086adfd09 100644 --- a/pkg/datapath/linux/linux_defaults/linux_defaults.go +++ b/pkg/datapath/linux/linux_defaults/linux_defaults.go @@ -27,9 +27,10 @@ const ( // table which is between 253-255. See ip-route(8). RouteTableInterfacesOffset = 10 - // MarkProxyToWorld is the default mark to use to indicate that a packet - // from proxy needs to be sent to the world. - MarkProxyToWorld = 0x800 + // MarkSkipTProxy is the default mark to use to indicate that a packet + // should skip tproxy-processing. This is needed for eg. traffic by transparent + // proxy connections which later passes through the cilium_host / cilium_net pair. + MarkSkipTProxy = 0x800 // RouteMarkDecrypt is the default route mark to use to indicate datapath // needs to decrypt a packet. diff --git a/pkg/datapath/linux/linux_defaults/mark.go b/pkg/datapath/linux/linux_defaults/mark.go index d84daa4afd373..2a36e7772fad6 100644 --- a/pkg/datapath/linux/linux_defaults/mark.go +++ b/pkg/datapath/linux/linux_defaults/mark.go @@ -62,11 +62,6 @@ const ( // to identify cilium-managed overlay traffic. MagicMarkOverlay int = 0x0400 - // MagicMarkOverlay is set by the to-overlay program, and can be used - // to identify cilium-managed overlay traffic which was previously IPsec - // encrypted before encapsulation. - MagicMarkOverlayEncrypted int = 0x1400 - // MagicMarkProxyEgressEPID determines that the traffic is sourced from // the proxy which is capturing traffic before it is subject to egress // policy enforcement that must be done after the proxy. The identity diff --git a/pkg/datapath/linux/node.go b/pkg/datapath/linux/node.go index 5f5c6bdc041de..9d9adeda91d17 100644 --- a/pkg/datapath/linux/node.go +++ b/pkg/datapath/linux/node.go @@ -671,11 +671,6 @@ func (n *linuxNodeHandler) replaceHostRules() error { return err } } - rule.Mark = linux_defaults.RouteMarkEncrypt - if err := route.ReplaceRule(rule); err != nil { - n.log.Error("Replace IPv4 route encrypt rule failed", logfields.Error, err) - return err - } } if n.nodeConfig.EnableIPv6 { @@ -684,11 +679,6 @@ func (n *linuxNodeHandler) replaceHostRules() error { n.log.Error("Replace IPv6 route decrypt rule failed", logfields.Error, err) return err } - rule.Mark = linux_defaults.RouteMarkEncrypt - if err := route.ReplaceRuleIPv6(rule); err != nil { - n.log.Error("Replace IPv6 route ecrypt rule failed", logfields.Error, err) - return err - } } return nil @@ -710,6 +700,11 @@ func (n *linuxNodeHandler) NodeConfigurationChanged(newConfig datapath.LocalNode return fmt.Errorf("failed to update or remove node routes: %w", err) } + // Clean up stale IP rules for IPsec. This can be removed in the v1.20 release. + if err := n.removeEncryptRules(); err != nil { + n.log.Warn("Cannot cleanup previous encryption rule state.", logfields.Error, err) + } + if newConfig.EnableIPSec { // For the ENI ipam mode on EKS, this will be the interface that // the router (cilium_host) IP is associated to. @@ -732,8 +727,8 @@ func (n *linuxNodeHandler) NodeConfigurationChanged(newConfig datapath.LocalNode } n.registerIpsecMetricOnce() } else { - if err := n.removeEncryptRules(); err != nil { - n.log.Warn("Cannot cleanup previous encryption rule state.", logfields.Error, err) + if err := n.removeDecryptRules(); err != nil { + n.log.Warn("Cannot cleanup previous decryption rule state.", logfields.Error, err) } if err := n.ipsecAgent.DeleteXFRM(ipsec.AllReqID); err != nil { return fmt.Errorf("failed to delete xfrm policies on node configuration changed: %w", err) diff --git a/pkg/datapath/linux/route/route_linux.go b/pkg/datapath/linux/route/route_linux.go index 19ffa73199da9..a38ea2d0f5dec 100644 --- a/pkg/datapath/linux/route/route_linux.go +++ b/pkg/datapath/linux/route/route_linux.go @@ -308,7 +308,6 @@ func Delete(route Route) error { Table: route.Table, Type: route.Type, Protocol: netlink.RouteProtocol(route.Proto), - MTU: route.MTU, } // Scope can only be specified for IPv4 diff --git a/pkg/datapath/loader/base.go b/pkg/datapath/loader/base.go index 2263623325004..cdee3716d50c0 100644 --- a/pkg/datapath/loader/base.go +++ b/pkg/datapath/loader/base.go @@ -217,7 +217,7 @@ func (l *loader) reinitializeIPSec(lnc *datapath.LocalNodeConfiguration) error { return nil } - spec, err := bpf.LoadCollectionSpec(l.logger, networkObj) + spec, err := ebpf.LoadCollectionSpec(networkObj) if err != nil { return fmt.Errorf("loading eBPF ELF %s: %w", networkObj, err) } @@ -227,7 +227,7 @@ func (l *loader) reinitializeIPSec(lnc *datapath.LocalNodeConfiguration) error { CollectionOptions: ebpf.CollectionOptions{ Maps: ebpf.MapOptions{PinPath: bpf.TCGlobalsPath()}, }, - Constants: config.NewBPFNetwork(nodeConfig(lnc)), + Constants: config.NewBPFNetwork(config.NodeConfig(lnc)), }) if err != nil { return err @@ -470,7 +470,7 @@ func (l *loader) Reinitialize(ctx context.Context, lnc *datapath.LocalNodeConfig if err := compileWithOptions(ctx, l.logger, "bpf_sock.c", "bpf_sock.o", nil); err != nil { logging.Fatal(l.logger, "failed to compile bpf_sock.c", logfields.Error, err) } - if err := socketlb.Enable(l.logger, l.sysctl, lnc.KPRConfig); err != nil { + if err := socketlb.Enable(l.logger, l.sysctl, lnc); err != nil { return err } } else { diff --git a/pkg/datapath/loader/cache.go b/pkg/datapath/loader/cache.go index 6c1fc3e40fcef..78c10f75987d0 100644 --- a/pkg/datapath/loader/cache.go +++ b/pkg/datapath/loader/cache.go @@ -14,7 +14,6 @@ import ( "github.com/cilium/ebpf" - "github.com/cilium/cilium/pkg/bpf" "github.com/cilium/cilium/pkg/bpf/analyze" "github.com/cilium/cilium/pkg/common" "github.com/cilium/cilium/pkg/datapath/loader/metrics" @@ -226,7 +225,7 @@ func (o *objectCache) fetchOrCompile(ctx context.Context, nodeCfg *datapath.Loca obj.path = path - obj.spec, err = bpf.LoadCollectionSpec(o.logger, path) + obj.spec, err = ebpf.LoadCollectionSpec(path) if err != nil { return nil, "", fmt.Errorf("load eBPF ELF %s: %w", path, err) } diff --git a/pkg/datapath/loader/loader.go b/pkg/datapath/loader/loader.go index 72bfae43796aa..6a5838764447c 100644 --- a/pkg/datapath/loader/loader.go +++ b/pkg/datapath/loader/loader.go @@ -173,7 +173,7 @@ func bpfMasqAddrs(ifName string, cfg *datapath.LocalNodeConfiguration) (masq4, m // netdevRewrites prepares configuration data for attaching bpf_host.c to the // specified externally-facing network device. func netdevRewrites(ep datapath.EndpointConfiguration, lnc *datapath.LocalNodeConfiguration, link netlink.Link) (*config.BPFHost, map[string]string) { - cfg := config.NewBPFHost(nodeConfig(lnc)) + cfg := config.NewBPFHost(config.NodeConfig(lnc)) // External devices can be L2-less, in which case it won't have a MAC address // and its ethernet header length is set to 0. @@ -209,6 +209,7 @@ func netdevRewrites(ep datapath.EndpointConfiguration, lnc *datapath.LocalNodeCo cfg.EnableExtendedIPProtocols = option.Config.EnableExtendedIPProtocols cfg.HostEpID = uint16(lnc.HostEndpointID) + cfg.EnableNoServiceEndpointsRoutable = lnc.SvcRouteConfig.EnableNoServiceEndpointsRoutable renames := map[string]string{ // Rename the calls map to include the device's ifindex. @@ -338,7 +339,7 @@ func reloadHostEndpoint(logger *slog.Logger, ep datapath.Endpoint, lnc *datapath // ciliumHostRewrites prepares configuration data for attaching bpf_host.c to // the cilium_host network device. func ciliumHostRewrites(ep datapath.EndpointConfiguration, lnc *datapath.LocalNodeConfiguration) (*config.BPFHost, map[string]string) { - cfg := config.NewBPFHost(nodeConfig(lnc)) + cfg := config.NewBPFHost(config.NodeConfig(lnc)) em := ep.GetNodeMAC() if len(em) != 6 { @@ -410,7 +411,7 @@ func attachCiliumHost(logger *slog.Logger, ep datapath.Endpoint, lnc *datapath.L // ciliumNetRewrites prepares configuration data for attaching bpf_host.c to // the cilium_net network device. func ciliumNetRewrites(ep datapath.EndpointConfiguration, lnc *datapath.LocalNodeConfiguration, link netlink.Link) (*config.BPFHost, map[string]string) { - cfg := config.NewBPFHost(nodeConfig(lnc)) + cfg := config.NewBPFHost(config.NodeConfig(lnc)) cfg.SecurityLabel = ep.GetIdentity().Uint32() @@ -425,6 +426,7 @@ func ciliumNetRewrites(ep datapath.EndpointConfiguration, lnc *datapath.LocalNod } cfg.EnableExtendedIPProtocols = option.Config.EnableExtendedIPProtocols + cfg.EnableNoServiceEndpointsRoutable = lnc.SvcRouteConfig.EnableNoServiceEndpointsRoutable ifindex := link.Attrs().Index cfg.InterfaceIfindex = uint32(ifindex) @@ -566,7 +568,7 @@ func attachNetworkDevices(logger *slog.Logger, ep datapath.Endpoint, lnc *datapa // endpointRewrites prepares configuration data for attaching bpf_lxc.c to the // specified workload endpoint. func endpointRewrites(ep datapath.EndpointConfiguration, lnc *datapath.LocalNodeConfiguration) (*config.BPFLXC, map[string]string) { - cfg := config.NewBPFLXC(nodeConfig(lnc)) + cfg := config.NewBPFLXC(config.NodeConfig(lnc)) if ep.IPv4Address().IsValid() { cfg.EndpointIPv4 = ep.IPv4Address().As4() @@ -593,6 +595,7 @@ func endpointRewrites(ep datapath.EndpointConfiguration, lnc *datapath.LocalNode cfg.PolicyVerdictLogFilter = ep.GetPolicyVerdictLogFilter() cfg.HostEpID = uint16(lnc.HostEndpointID) + cfg.EnableNoServiceEndpointsRoutable = lnc.SvcRouteConfig.EnableNoServiceEndpointsRoutable renames := map[string]string{ // Rename the calls and policy maps to include the endpoint's id. @@ -700,15 +703,16 @@ func replaceOverlayDatapath(ctx context.Context, logger *slog.Logger, lnc *datap return fmt.Errorf("compiling overlay program: %w", err) } - spec, err := bpf.LoadCollectionSpec(logger, overlayObj) + spec, err := ebpf.LoadCollectionSpec(overlayObj) if err != nil { return fmt.Errorf("loading eBPF ELF %s: %w", overlayObj, err) } - cfg := config.NewBPFOverlay(nodeConfig(lnc)) + cfg := config.NewBPFOverlay(config.NodeConfig(lnc)) cfg.InterfaceIfindex = uint32(device.Attrs().Index) cfg.EnableExtendedIPProtocols = option.Config.EnableExtendedIPProtocols + cfg.EnableNoServiceEndpointsRoutable = lnc.SvcRouteConfig.EnableNoServiceEndpointsRoutable var obj overlayObjects commit, err := bpf.LoadAndAssign(logger, &obj, spec, &bpf.CollectionOptions{ @@ -747,12 +751,12 @@ func replaceWireguardDatapath(ctx context.Context, logger *slog.Logger, lnc *dat return fmt.Errorf("compiling wireguard program: %w", err) } - spec, err := bpf.LoadCollectionSpec(logger, wireguardObj) + spec, err := ebpf.LoadCollectionSpec(wireguardObj) if err != nil { return fmt.Errorf("loading eBPF ELF %s: %w", wireguardObj, err) } - cfg := config.NewBPFWireguard(nodeConfig(lnc)) + cfg := config.NewBPFWireguard(config.NodeConfig(lnc)) cfg.InterfaceIfindex = uint32(device.Attrs().Index) if !option.Config.EnableHostLegacyRouting { diff --git a/pkg/datapath/loader/loader_test.go b/pkg/datapath/loader/loader_test.go index 00a6073f96338..15b05c3b51a45 100644 --- a/pkg/datapath/loader/loader_test.go +++ b/pkg/datapath/loader/loader_test.go @@ -139,10 +139,10 @@ func TestPrivilegedReload(t *testing.T) { objPath := fmt.Sprintf("%s/%s", dirInfo.Output, endpointObj) tmp := testutils.TempBPFFS(t) - for range 2 { - spec, err := bpf.LoadCollectionSpec(logger, objPath) - require.NoError(t, err) + spec, err := ebpf.LoadCollectionSpec(objPath) + require.NoError(t, err) + for range 2 { coll, commit, err := bpf.LoadCollection(logger, spec, &bpf.CollectionOptions{ CollectionOptions: ebpf.CollectionOptions{Maps: ebpf.MapOptions{PinPath: tmp}}, }) @@ -287,12 +287,12 @@ func BenchmarkPrivilegedReplaceDatapath(b *testing.B) { objPath := fmt.Sprintf("%s/%s", dirInfo.Output, endpointObj) - for b.Loop() { - spec, err := bpf.LoadCollectionSpec(logger, objPath) - if err != nil { - b.Fatal(err) - } + spec, err := ebpf.LoadCollectionSpec(objPath) + if err != nil { + b.Fatal(err) + } + for b.Loop() { coll, commit, err := bpf.LoadCollection(logger, spec, &bpf.CollectionOptions{ CollectionOptions: ebpf.CollectionOptions{Maps: ebpf.MapOptions{PinPath: tmp}}, }) diff --git a/pkg/datapath/loader/verifier_load_test.go b/pkg/datapath/loader/verifier_load_test.go index afe2d91dadc2e..4db19606bda38 100644 --- a/pkg/datapath/loader/verifier_load_test.go +++ b/pkg/datapath/loader/verifier_load_test.go @@ -13,6 +13,7 @@ func lxcLoadPermutations() iter.Seq[*config.BPFLXC] { return func(yield func(*config.BPFLXC) bool) { for permutation := range permute(1) { cfg := config.NewBPFLXC(*config.NewNode()) + cfg.Node.TracingIPOptionType = 1 cfg.SecctxFromIPCache = permutation[0] if !yield(cfg) { return @@ -23,10 +24,17 @@ func lxcLoadPermutations() iter.Seq[*config.BPFLXC] { func hostLoadPermutations() iter.Seq[*config.BPFHost] { return func(yield func(*config.BPFHost) bool) { - for permutation := range permute(2) { + for permutation := range permute(3) { cfg := config.NewBPFHost(*config.NewNode()) + cfg.Node.TracingIPOptionType = 1 cfg.SecctxFromIPCache = permutation[0] cfg.EnableRemoteNodeMasquerade = permutation[1] + if permutation[2] { + cfg.EthHeaderLength = 0 + } else { + cfg.EthHeaderLength = 14 + } + if !yield(cfg) { return } @@ -37,6 +45,7 @@ func hostLoadPermutations() iter.Seq[*config.BPFHost] { func networkLoadPermutations() iter.Seq[*config.BPFNetwork] { return func(yield func(*config.BPFNetwork) bool) { cfg := config.NewBPFNetwork(*config.NewNode()) + cfg.Node.TracingIPOptionType = 1 if !yield(cfg) { return } @@ -47,6 +56,7 @@ func overlayLoadPermutations() iter.Seq[*config.BPFOverlay] { return func(yield func(*config.BPFOverlay) bool) { for permutation := range permute(1) { cfg := config.NewBPFOverlay(*config.NewNode()) + cfg.Node.TracingIPOptionType = 1 cfg.SecctxFromIPCache = permutation[0] if !yield(cfg) { return @@ -68,6 +78,7 @@ func wireguardLoadPermutations() iter.Seq[*config.BPFWireguard] { return func(yield func(*config.BPFWireguard) bool) { for permutation := range permute(1) { cfg := config.NewBPFWireguard(*config.NewNode()) + cfg.Node.TracingIPOptionType = 1 cfg.SecctxFromIPCache = permutation[0] if !yield(cfg) { return @@ -80,6 +91,7 @@ func xdpLoadPermutations() iter.Seq[*config.BPFXDP] { return func(yield func(*config.BPFXDP) bool) { for permutation := range permute(1) { cfg := config.NewBPFXDP(*config.NewNode()) + cfg.Node.TracingIPOptionType = 1 cfg.SecctxFromIPCache = permutation[0] if !yield(cfg) { return diff --git a/pkg/datapath/loader/verifier_test.go b/pkg/datapath/loader/verifier_test.go index 243a1ac3b2e5d..56a37e5dfe4da 100644 --- a/pkg/datapath/loader/verifier_test.go +++ b/pkg/datapath/loader/verifier_test.go @@ -172,7 +172,7 @@ func compileAndLoad[T any](perm buildPermutation[T], collection, source, output t.Logf("Compiled %s program: %s", collection, objFileName) - spec, err := bpf.LoadCollectionSpec(log, objFileName) + spec, err := ebpf.LoadCollectionSpec(objFileName) if err != nil { t.Fatalf("Failed to load BPF collection spec: %v", err) } diff --git a/pkg/datapath/loader/xdp.go b/pkg/datapath/loader/xdp.go index fc9cd6fe5c383..016f102ea443e 100644 --- a/pkg/datapath/loader/xdp.go +++ b/pkg/datapath/loader/xdp.go @@ -143,12 +143,12 @@ func compileAndLoadXDPProg(ctx context.Context, logger *slog.Logger, lnc *datapa return fmt.Errorf("retrieving device %s: %w", xdpDev, err) } - spec, err := bpf.LoadCollectionSpec(logger, objPath) + spec, err := ebpf.LoadCollectionSpec(objPath) if err != nil { return fmt.Errorf("loading eBPF ELF %s: %w", objPath, err) } - cfg := config.NewBPFXDP(nodeConfig(lnc)) + cfg := config.NewBPFXDP(config.NodeConfig(lnc)) cfg.InterfaceIfindex = uint32(iface.Attrs().Index) cfg.DeviceMTU = uint16(iface.Attrs().MTU) diff --git a/pkg/datapath/orchestrator/localnodeconfig.go b/pkg/datapath/orchestrator/localnodeconfig.go index 5d2d2bf51698c..7db434e11268f 100644 --- a/pkg/datapath/orchestrator/localnodeconfig.go +++ b/pkg/datapath/orchestrator/localnodeconfig.go @@ -23,6 +23,7 @@ import ( "github.com/cilium/cilium/pkg/mtu" "github.com/cilium/cilium/pkg/node" "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/svcrouteconfig" wgTypes "github.com/cilium/cilium/pkg/wireguard/types" ) @@ -52,6 +53,7 @@ func newLocalNodeConfig( xdpConfig xdp.Config, lbConfig loadbalancer.Config, kprCfg kpr.KPRConfig, + svcCfg svcrouteconfig.RoutesConfig, maglevConfig maglev.Config, mtuTbl statedb.Table[mtu.RouteMTU], wgCfg wgTypes.WireguardConfig, @@ -121,13 +123,13 @@ func newLocalNodeConfig( EnableLocalNodeRoute: config.EnableLocalNodeRoute && config.IPAM != ipamOption.IPAMENI && config.IPAM != ipamOption.IPAMAzure && config.IPAM != ipamOption.IPAMAlibabaCloud, EnableWireguard: wgCfg.Enabled(), EnableIPSec: ipsecCfg.Enabled(), - EnableIPSecEncryptedOverlay: ipsecCfg.EncryptedOverlayEnabled(), EncryptNode: config.EncryptNode, IPv4PodSubnets: cidr.NewCIDRSlice(config.IPv4PodSubnets), IPv6PodSubnets: cidr.NewCIDRSlice(config.IPv6PodSubnets), XDPConfig: xdpConfig, LBConfig: lbConfig, KPRConfig: kprCfg, + SvcRouteConfig: svcCfg, MaglevConfig: maglevConfig, }, common.MergeChannels(watchChans...), nil } diff --git a/pkg/datapath/orchestrator/orchestrator.go b/pkg/datapath/orchestrator/orchestrator.go index 594234e8530b1..98f0032e8daef 100644 --- a/pkg/datapath/orchestrator/orchestrator.go +++ b/pkg/datapath/orchestrator/orchestrator.go @@ -35,6 +35,7 @@ import ( "github.com/cilium/cilium/pkg/promise" "github.com/cilium/cilium/pkg/proxy" "github.com/cilium/cilium/pkg/rate" + "github.com/cilium/cilium/pkg/svcrouteconfig" "github.com/cilium/cilium/pkg/time" wgTypes "github.com/cilium/cilium/pkg/wireguard/types" ) @@ -109,6 +110,7 @@ type orchestratorParams struct { XDPConfig xdp.Config LBConfig loadbalancer.Config KPRConfig kpr.KPRConfig + SvcRouteConfig svcrouteconfig.RoutesConfig MaglevConfig maglev.Config WgConfig wgTypes.WireguardConfig IPsecConfig datapath.IPsecConfig @@ -210,6 +212,7 @@ func (o *orchestrator) reconciler(ctx context.Context, health cell.Health) error o.params.XDPConfig, o.params.LBConfig, o.params.KPRConfig, + o.params.SvcRouteConfig, o.params.MaglevConfig, o.params.MTU, o.params.WgConfig, diff --git a/pkg/datapath/tables/node_address_test.go b/pkg/datapath/tables/node_address_test.go index 525139bd29e0e..c888e5252a0aa 100644 --- a/pkg/datapath/tables/node_address_test.go +++ b/pkg/datapath/tables/node_address_test.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/defaults" "github.com/cilium/cilium/pkg/hive" "github.com/cilium/cilium/pkg/node" @@ -796,6 +797,7 @@ func fixture(t *testing.T, addressScopeMax int, beforeStart func(*hive.Hive)) (* NodeAddressCell, node.LocalNodeStoreCell, cell.Provide( + func() cmtypes.ClusterInfo { return cmtypes.ClusterInfo{} }, NewDeviceTable, statedb.RWTable[*Device].ToTable, NewRouteTable, @@ -1088,10 +1090,15 @@ func TestNodeAddressFromRoute(t *testing.T) { statedb.RWTable[*Route].ToTable, ), NodeAddressCell, - cell.Provide(func() node.LocalNodeSynchronizer { return testLocalNodeSync{} }), - cell.Provide(func() *option.DaemonConfig { - return &option.DaemonConfig{AddressScopeMax: defaults.AddressScopeMax} - }), + cell.Provide( + func() node.LocalNodeSynchronizer { return testLocalNodeSync{} }, + func() *option.DaemonConfig { + return &option.DaemonConfig{AddressScopeMax: defaults.AddressScopeMax} + }, + func() cmtypes.ClusterInfo { + return cmtypes.ClusterInfo{} + }, + ), // Capture table handles for use in the test. cell.Invoke(func(db_ *statedb.DB, d statedb.RWTable[*Device], r statedb.RWTable[*Route], na statedb.Table[NodeAddress]) { diff --git a/pkg/datapath/types/ipsec.go b/pkg/datapath/types/ipsec.go index 818e12a9e56d6..9801691bb04f1 100644 --- a/pkg/datapath/types/ipsec.go +++ b/pkg/datapath/types/ipsec.go @@ -20,7 +20,6 @@ type IPsecAgent interface { type IPsecConfig interface { Enabled() bool - EncryptedOverlayEnabled() bool UseCiliumInternalIP() bool DNSProxyInsecureSkipTransparentModeCheckEnabled() bool } diff --git a/pkg/datapath/types/loader.go b/pkg/datapath/types/loader.go index 9d7c2a6bed057..53c474b455ccb 100644 --- a/pkg/datapath/types/loader.go +++ b/pkg/datapath/types/loader.go @@ -71,6 +71,11 @@ type IptablesManager interface { // See comments for InstallNoTrackRules. RemoveNoTrackRules(ip netip.Addr, port uint16) + + // AddNoTrackHostPorts/RemoveNoTrackHostPort are explicitly called when a pod has a valid "no-track-host-ports" annotation. + // causes iptables notrack rules to be added/removed so CT is skipped for pods using host networking on the requested ports. + AddNoTrackHostPorts(namespace, name string, ports []string) + RemoveNoTrackHostPorts(namespace, name string) } // CompilationLock is a interface over a mutex, it is used by both the loader, daemon diff --git a/pkg/datapath/types/node.go b/pkg/datapath/types/node.go index a4952ac85e35d..375fa8ea228ad 100644 --- a/pkg/datapath/types/node.go +++ b/pkg/datapath/types/node.go @@ -14,6 +14,7 @@ import ( "github.com/cilium/cilium/pkg/loadbalancer" "github.com/cilium/cilium/pkg/maglev" nodeTypes "github.com/cilium/cilium/pkg/node/types" + "github.com/cilium/cilium/pkg/svcrouteconfig" ) type MTUConfiguration interface { @@ -174,9 +175,6 @@ type LocalNodeConfiguration struct { // EnableIPSec enables IPSec routes EnableIPSec bool - // EnableIPSecEncryptedOverlay enables IPSec routes for overlay traffic - EnableIPSecEncryptedOverlay bool - // EncryptNode enables encrypting NodeIP traffic EncryptNode bool @@ -202,6 +200,8 @@ type LocalNodeConfiguration struct { MaglevConfig maglev.Config KPRConfig kpr.KPRConfig + + SvcRouteConfig svcrouteconfig.RoutesConfig } func (cfg *LocalNodeConfiguration) DeviceNames() []string { diff --git a/pkg/datapath/types/zz_generated.deepequal.go b/pkg/datapath/types/zz_generated.deepequal.go index bccebdfee8d0e..121b0bd7c2c9e 100644 --- a/pkg/datapath/types/zz_generated.deepequal.go +++ b/pkg/datapath/types/zz_generated.deepequal.go @@ -247,9 +247,6 @@ func (in *LocalNodeConfiguration) DeepEqual(other *LocalNodeConfiguration) bool if in.EnableIPSec != other.EnableIPSec { return false } - if in.EnableIPSecEncryptedOverlay != other.EnableIPSecEncryptedOverlay { - return false - } if in.EncryptNode != other.EncryptNode { return false } @@ -303,5 +300,9 @@ func (in *LocalNodeConfiguration) DeepEqual(other *LocalNodeConfiguration) bool return false } + if in.SvcRouteConfig != other.SvcRouteConfig { + return false + } + return true } diff --git a/pkg/defaults/defaults.go b/pkg/defaults/defaults.go index 5570664c848ef..94d6a2b14b09d 100644 --- a/pkg/defaults/defaults.go +++ b/pkg/defaults/defaults.go @@ -479,8 +479,9 @@ const ( TunnelPortGeneve uint16 = 6081 // EnableVTEP enables VXLAN Tunnel Endpoint (VTEP) Integration - EnableVTEP = false - MaxVTEPDevices = 8 + EnableVTEP = false + MaxVTEPDevices = 8 + MaxVtepPolicyEntries = 16384 // Enable BGP control plane features. EnableBGPControlPlane = false @@ -537,6 +538,9 @@ const ( // EnableExtendedIPProtocols controls whether traffic with extended IP protocols is supported in datapath. EnableExtendedIPProtocols = false + + // IPTracingOptionType is the default value for option.IPTracingOptionType + IPTracingOptionType = 0 ) var ( diff --git a/pkg/dial/resolver_test.go b/pkg/dial/resolver_test.go index a911207e9f092..b366e217fc6cd 100644 --- a/pkg/dial/resolver_test.go +++ b/pkg/dial/resolver_test.go @@ -184,6 +184,7 @@ func TestServiceBackendResolver(t *testing.T) { writer.Cell, cell.Provide( + func() cmtypes.ClusterInfo { return cmtypes.ClusterInfo{} }, ServiceBackendResolverFactory("test1"), func() *option.DaemonConfig { return &option.DaemonConfig{} }, diff --git a/pkg/endpoint/api.go b/pkg/endpoint/api.go index 7f2dbee2b6b43..f0156d19fef6f 100644 --- a/pkg/endpoint/api.go +++ b/pkg/endpoint/api.go @@ -9,6 +9,7 @@ package endpoint import ( "context" "fmt" + "io" "log/slog" "maps" "slices" @@ -64,12 +65,12 @@ func (e *Endpoint) GetLabelsModel() (*models.LabelConfiguration, error) { } // NewEndpointFromChangeModel creates a new endpoint from a request -func NewEndpointFromChangeModel(ctx context.Context, logger *slog.Logger, dnsRulesAPI DNSRulesAPI, epBuildQueue EndpointBuildQueue, loader datapath.Loader, orchestrator datapath.Orchestrator, compilationLock datapath.CompilationLock, bandwidthManager datapath.BandwidthManager, ipTablesManager datapath.IptablesManager, identityManager identitymanager.IDManager, monitorAgent monitoragent.Agent, policyMapFactory policymap.Factory, policyRepo policy.PolicyRepository, namedPortsGetter namedPortsGetter, proxy EndpointProxy, allocator cache.IdentityAllocator, ctMapGC ctmap.GCRunner, kvstoreSyncher *ipcache.IPIdentitySynchronizer, model *models.EndpointChangeRequest, wgCfg wgTypes.WireguardConfig, ipsecCfg datapath.IPsecConfig) (*Endpoint, error) { +func NewEndpointFromChangeModel(ctx context.Context, logger *slog.Logger, dnsRulesAPI DNSRulesAPI, epBuildQueue EndpointBuildQueue, loader datapath.Loader, orchestrator datapath.Orchestrator, compilationLock datapath.CompilationLock, bandwidthManager datapath.BandwidthManager, ipTablesManager datapath.IptablesManager, identityManager identitymanager.IDManager, monitorAgent monitoragent.Agent, policyMapFactory policymap.Factory, policyRepo policy.PolicyRepository, namedPortsGetter namedPortsGetter, proxy EndpointProxy, allocator cache.IdentityAllocator, ctMapGC ctmap.GCRunner, kvstoreSyncher *ipcache.IPIdentitySynchronizer, model *models.EndpointChangeRequest, wgCfg wgTypes.WireguardConfig, ipsecCfg datapath.IPsecConfig, policyDebugLog io.Writer) (*Endpoint, error) { if model == nil { return nil, nil } - ep := createEndpoint(logger, dnsRulesAPI, epBuildQueue, loader, orchestrator, compilationLock, bandwidthManager, ipTablesManager, identityManager, monitorAgent, policyMapFactory, policyRepo, namedPortsGetter, proxy, allocator, ctMapGC, kvstoreSyncher, uint16(model.ID), model.InterfaceName, wgCfg, ipsecCfg) + ep := createEndpoint(logger, dnsRulesAPI, epBuildQueue, loader, orchestrator, compilationLock, bandwidthManager, ipTablesManager, identityManager, monitorAgent, policyMapFactory, policyRepo, namedPortsGetter, proxy, allocator, ctMapGC, kvstoreSyncher, uint16(model.ID), model.InterfaceName, wgCfg, ipsecCfg, policyDebugLog) ep.ifIndex = int(model.InterfaceIndex) ep.containerIfName = model.ContainerInterfaceName ep.parentIfIndex = int(model.ParentInterfaceIndex) diff --git a/pkg/endpoint/bpf_test.go b/pkg/endpoint/bpf_test.go index 31cd813de8fca..b946aa58b837d 100644 --- a/pkg/endpoint/bpf_test.go +++ b/pkg/endpoint/bpf_test.go @@ -27,7 +27,7 @@ func TestWriteInformationalComments(t *testing.T) { s := setupEndpointSuite(t) model := newTestEndpointModel(100, StateWaitingForIdentity) - e, err := NewEndpointFromChangeModel(t.Context(), logger, nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + e, err := NewEndpointFromChangeModel(t.Context(), logger, nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) e.Start(uint16(model.ID)) @@ -47,7 +47,7 @@ func BenchmarkWriteHeaderfile(b *testing.B) { s := setupEndpointSuite(b) model := newTestEndpointModel(100, StateWaitingForIdentity) - e, err := NewEndpointFromChangeModel(b.Context(), logger, nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, &fakeTypes.IPsecConfig{}) + e, err := NewEndpointFromChangeModel(b.Context(), logger, nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, &fakeTypes.IPsecConfig{}, nil) require.NoError(b, err) e.Start(uint16(model.ID)) diff --git a/pkg/endpoint/creator/creator.go b/pkg/endpoint/creator/creator.go index ea9f121f02750..5ef0af74ca4b1 100644 --- a/pkg/endpoint/creator/creator.go +++ b/pkg/endpoint/creator/creator.go @@ -6,8 +6,13 @@ package creator import ( "context" "log/slog" + "os" + "path/filepath" + "strconv" + "sync" "github.com/cilium/hive/cell" + "github.com/cilium/lumberjack/v2" "github.com/cilium/cilium/api/v1/models" datapath "github.com/cilium/cilium/pkg/datapath/types" @@ -21,6 +26,7 @@ import ( "github.com/cilium/cilium/pkg/maps/policymap" monitoragent "github.com/cilium/cilium/pkg/monitor/agent" "github.com/cilium/cilium/pkg/node" + "github.com/cilium/cilium/pkg/option" "github.com/cilium/cilium/pkg/policy" "github.com/cilium/cilium/pkg/proxy" "github.com/cilium/cilium/pkg/time" @@ -66,6 +72,7 @@ type endpointCreator struct { kvstoreSyncher *ipcache.IPIdentitySynchronizer wgConfig wgTypes.WireguardConfig ipsecConfig datapath.IPsecConfig + policyLogger func() *lumberjack.Logger } var _ EndpointCreator = &endpointCreator{} @@ -117,6 +124,30 @@ func newEndpointCreator(p endpointManagerParams) EndpointCreator { kvstoreSyncher: p.KVStoreSynchronizer, wgConfig: p.WgConfig, ipsecConfig: p.IPSecConfig, + policyLogger: sync.OnceValue(policyDebugLogger), + } +} + +func policyDebugLogger() *lumberjack.Logger { + maxSize := 10 // 10 MB + if ms := os.Getenv("CILIUM_DBG_POLICY_LOG_MAX_SIZE"); ms != "" { + if ms, err := strconv.Atoi(ms); err == nil { + maxSize = ms + } + } + maxBackups := 3 + if mb := os.Getenv("CILIUM_DBG_POLICY_LOG_MAX_BACKUPS"); mb != "" { + if mb, err := strconv.Atoi(mb); err == nil { + maxBackups = mb + } + } + return &lumberjack.Logger{ + Filename: filepath.Join(option.Config.StateDir, "endpoint-policy.log"), + MaxSize: maxSize, + MaxBackups: maxBackups, + MaxAge: 28, // days + LocalTime: true, + Compress: true, } } @@ -143,6 +174,7 @@ func (c *endpointCreator) NewEndpointFromChangeModel(ctx context.Context, base * base, c.wgConfig, c.ipsecConfig, + c.policyLogger(), ) } @@ -192,6 +224,7 @@ func (c *endpointCreator) AddIngressEndpoint(ctx context.Context) error { c.kvstoreSyncher, c.wgConfig, c.ipsecConfig, + c.policyLogger(), ) if err != nil { return err @@ -227,6 +260,7 @@ func (c *endpointCreator) AddHostEndpoint(ctx context.Context) error { c.kvstoreSyncher, c.wgConfig, c.ipsecConfig, + c.policyLogger(), ) if err != nil { return err diff --git a/pkg/endpoint/endpoint.go b/pkg/endpoint/endpoint.go index b3d7df90972e0..c5bee63c7588d 100644 --- a/pkg/endpoint/endpoint.go +++ b/pkg/endpoint/endpoint.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "log/slog" "net/netip" "os" @@ -399,6 +400,9 @@ type Endpoint struct { basePolicyLogger atomic.Pointer[slog.Logger] + // Points to a shared policy debug log file. + policyDebugLog io.Writer + // policyLogger is a logrus object with fields set to report an endpoints information. // This must only be accessed with atomic LoadPointer/StorePointer. // 'mutex' must be Lock()ed to synchronize stores. No lock needs to be held @@ -601,45 +605,45 @@ func (e *Endpoint) waitForProxyCompletions(proxyWaitGroup *completion.WaitGroup) return nil } -func createEndpoint(logger *slog.Logger, dnsRulesAPI DNSRulesAPI, epBuildQueue EndpointBuildQueue, loader datapath.Loader, orchestrator datapath.Orchestrator, compilationLock datapath.CompilationLock, bandwidthManager datapath.BandwidthManager, ipTablesManager datapath.IptablesManager, identityManager identitymanager.IDManager, monitorAgent monitoragent.Agent, policyMapFactory policymap.Factory, policyRepo policy.PolicyRepository, namedPortsGetter namedPortsGetter, proxy EndpointProxy, allocator cache.IdentityAllocator, ctMapGC ctmap.GCRunner, kvstoreSyncher *ipcache.IPIdentitySynchronizer, ID uint16, ifName string, wgCfg wgTypes.WireguardConfig, ipsecCfg datapath.IPsecConfig) *Endpoint { +func createEndpoint(logger *slog.Logger, dnsRulesAPI DNSRulesAPI, epBuildQueue EndpointBuildQueue, loader datapath.Loader, orchestrator datapath.Orchestrator, compilationLock datapath.CompilationLock, bandwidthManager datapath.BandwidthManager, ipTablesManager datapath.IptablesManager, identityManager identitymanager.IDManager, monitorAgent monitoragent.Agent, policyMapFactory policymap.Factory, policyRepo policy.PolicyRepository, namedPortsGetter namedPortsGetter, proxy EndpointProxy, allocator cache.IdentityAllocator, ctMapGC ctmap.GCRunner, kvstoreSyncher *ipcache.IPIdentitySynchronizer, ID uint16, ifName string, wgCfg wgTypes.WireguardConfig, ipsecCfg datapath.IPsecConfig, policyDebugLog io.Writer) *Endpoint { ep := &Endpoint{ - dnsRulesAPI: dnsRulesAPI, - epBuildQueue: epBuildQueue, - loader: loader, - orchestrator: orchestrator, - compilationLock: compilationLock, - bandwidthManager: bandwidthManager, - ipTablesManager: ipTablesManager, - identityManager: identityManager, - monitorAgent: monitorAgent, - wgConfig: wgCfg, - ipsecConfig: ipsecCfg, - policyMapFactory: policyMapFactory, - policyRepo: policyRepo, - namedPortsGetter: namedPortsGetter, - ID: ID, - createdAt: time.Now(), - proxy: proxy, - ifName: ifName, - labels: labels.NewOpLabels(), - Options: option.NewIntOptions(&EndpointMutableOptionLibrary), - DNSRules: nil, - DNSRulesV2: nil, - DNSHistory: fqdn.NewDNSCacheWithLimit(option.Config.ToFQDNsMinTTL, option.Config.ToFQDNsMaxIPsPerHost), - DNSZombies: fqdn.NewDNSZombieMappings(logger, option.Config.ToFQDNsMaxDeferredConnectionDeletes, option.Config.ToFQDNsMaxIPsPerHost), - state: "", - status: NewEndpointStatus(), - hasBPFProgram: make(chan struct{}), - desiredPolicy: policy.NewEndpointPolicy(logger, policyRepo), - controllers: controller.NewManager(), - regenFailedChan: make(chan struct{}, 1), - allocator: allocator, - logLimiter: logging.NewLimiter(10*time.Second, 3), // 1 log / 10 secs, burst of 3 - noTrackPort: 0, - properties: map[string]any{}, - ctMapGC: ctMapGC, - kvstoreSyncher: kvstoreSyncher, - + dnsRulesAPI: dnsRulesAPI, + epBuildQueue: epBuildQueue, + loader: loader, + orchestrator: orchestrator, + compilationLock: compilationLock, + bandwidthManager: bandwidthManager, + ipTablesManager: ipTablesManager, + identityManager: identityManager, + monitorAgent: monitorAgent, + wgConfig: wgCfg, + ipsecConfig: ipsecCfg, + policyMapFactory: policyMapFactory, + policyRepo: policyRepo, + namedPortsGetter: namedPortsGetter, + ID: ID, + createdAt: time.Now(), + proxy: proxy, + ifName: ifName, + labels: labels.NewOpLabels(), + Options: option.NewIntOptions(&EndpointMutableOptionLibrary), + DNSRules: nil, + DNSRulesV2: nil, + DNSHistory: fqdn.NewDNSCacheWithLimit(option.Config.ToFQDNsMinTTL, option.Config.ToFQDNsMaxIPsPerHost), + DNSZombies: fqdn.NewDNSZombieMappings(logger, option.Config.ToFQDNsMaxDeferredConnectionDeletes, option.Config.ToFQDNsMaxIPsPerHost), + state: "", + status: NewEndpointStatus(), + hasBPFProgram: make(chan struct{}), + desiredPolicy: policy.NewEndpointPolicy(logger, policyRepo), + controllers: controller.NewManager(), + regenFailedChan: make(chan struct{}, 1), + allocator: allocator, + logLimiter: logging.NewLimiter(10*time.Second, 3), // 1 log / 10 secs, burst of 3 + noTrackPort: 0, + properties: map[string]any{}, + ctMapGC: ctMapGC, + kvstoreSyncher: kvstoreSyncher, + policyDebugLog: policyDebugLog, forcePolicyCompute: true, } @@ -682,8 +686,8 @@ func (e *Endpoint) initDNSHistoryTrigger() { } // CreateIngressEndpoint creates the endpoint corresponding to Cilium Ingress. -func CreateIngressEndpoint(logger *slog.Logger, dnsRulesAPI DNSRulesAPI, epBuildQueue EndpointBuildQueue, loader datapath.Loader, orchestrator datapath.Orchestrator, compilationLock datapath.CompilationLock, bandwidthManager datapath.BandwidthManager, ipTablesManager datapath.IptablesManager, identityManager identitymanager.IDManager, monitorAgent monitoragent.Agent, policyMapFactory policymap.Factory, policyRepo policy.PolicyRepository, namedPortsGetter namedPortsGetter, proxy EndpointProxy, allocator cache.IdentityAllocator, ctMapGC ctmap.GCRunner, kvstoreSyncher *ipcache.IPIdentitySynchronizer, wgCfg wgTypes.WireguardConfig, ipsecCfg datapath.IPsecConfig) (*Endpoint, error) { - ep := createEndpoint(logger, dnsRulesAPI, epBuildQueue, loader, orchestrator, compilationLock, bandwidthManager, ipTablesManager, identityManager, monitorAgent, policyMapFactory, policyRepo, namedPortsGetter, proxy, allocator, ctMapGC, kvstoreSyncher, 0, "", wgCfg, ipsecCfg) +func CreateIngressEndpoint(logger *slog.Logger, dnsRulesAPI DNSRulesAPI, epBuildQueue EndpointBuildQueue, loader datapath.Loader, orchestrator datapath.Orchestrator, compilationLock datapath.CompilationLock, bandwidthManager datapath.BandwidthManager, ipTablesManager datapath.IptablesManager, identityManager identitymanager.IDManager, monitorAgent monitoragent.Agent, policyMapFactory policymap.Factory, policyRepo policy.PolicyRepository, namedPortsGetter namedPortsGetter, proxy EndpointProxy, allocator cache.IdentityAllocator, ctMapGC ctmap.GCRunner, kvstoreSyncher *ipcache.IPIdentitySynchronizer, wgCfg wgTypes.WireguardConfig, ipsecCfg datapath.IPsecConfig, policyDebugLog io.Writer) (*Endpoint, error) { + ep := createEndpoint(logger, dnsRulesAPI, epBuildQueue, loader, orchestrator, compilationLock, bandwidthManager, ipTablesManager, identityManager, monitorAgent, policyMapFactory, policyRepo, namedPortsGetter, proxy, allocator, ctMapGC, kvstoreSyncher, 0, "", wgCfg, ipsecCfg, policyDebugLog) ep.DatapathConfiguration = NewDatapathConfiguration() ep.isIngress = true @@ -713,13 +717,13 @@ func CreateIngressEndpoint(logger *slog.Logger, dnsRulesAPI DNSRulesAPI, epBuild } // CreateHostEndpoint creates the endpoint corresponding to the host. -func CreateHostEndpoint(logger *slog.Logger, dnsRulesAPI DNSRulesAPI, epBuildQueue EndpointBuildQueue, loader datapath.Loader, orchestrator datapath.Orchestrator, compilationLock datapath.CompilationLock, bandwidthManager datapath.BandwidthManager, ipTablesManager datapath.IptablesManager, identityManager identitymanager.IDManager, monitorAgent monitoragent.Agent, policyMapFactory policymap.Factory, policyRepo policy.PolicyRepository, namedPortsGetter namedPortsGetter, proxy EndpointProxy, allocator cache.IdentityAllocator, ctMapGC ctmap.GCRunner, kvstoreSyncher *ipcache.IPIdentitySynchronizer, wgCfg wgTypes.WireguardConfig, ipsecCfg datapath.IPsecConfig) (*Endpoint, error) { +func CreateHostEndpoint(logger *slog.Logger, dnsRulesAPI DNSRulesAPI, epBuildQueue EndpointBuildQueue, loader datapath.Loader, orchestrator datapath.Orchestrator, compilationLock datapath.CompilationLock, bandwidthManager datapath.BandwidthManager, ipTablesManager datapath.IptablesManager, identityManager identitymanager.IDManager, monitorAgent monitoragent.Agent, policyMapFactory policymap.Factory, policyRepo policy.PolicyRepository, namedPortsGetter namedPortsGetter, proxy EndpointProxy, allocator cache.IdentityAllocator, ctMapGC ctmap.GCRunner, kvstoreSyncher *ipcache.IPIdentitySynchronizer, wgCfg wgTypes.WireguardConfig, ipsecCfg datapath.IPsecConfig, policyDebugLog io.Writer) (*Endpoint, error) { iface, err := safenetlink.LinkByName(defaults.HostDevice) if err != nil { return nil, err } - ep := createEndpoint(logger, dnsRulesAPI, epBuildQueue, loader, orchestrator, compilationLock, bandwidthManager, ipTablesManager, identityManager, monitorAgent, policyMapFactory, policyRepo, namedPortsGetter, proxy, allocator, ctMapGC, kvstoreSyncher, 0, defaults.HostDevice, wgCfg, ipsecCfg) + ep := createEndpoint(logger, dnsRulesAPI, epBuildQueue, loader, orchestrator, compilationLock, bandwidthManager, ipTablesManager, identityManager, monitorAgent, policyMapFactory, policyRepo, namedPortsGetter, proxy, allocator, ctMapGC, kvstoreSyncher, 0, defaults.HostDevice, wgCfg, ipsecCfg, policyDebugLog) ep.isHost = true ep.mac = mac.MAC(iface.Attrs().HardwareAddr) ep.nodeMAC = mac.MAC(iface.Attrs().HardwareAddr) @@ -905,11 +909,6 @@ func (e *Endpoint) SetDefaultOpts(opts *option.IntOptions) { e.Options.SetValidated(k, opts.GetValue(k)) } } - // Always set DebugPolicy if Debug is configured, possibly overriding this setting in - // 'opts' - if option.Config.Debug { - e.Options.SetValidated(option.DebugPolicy, option.OptionEnabled) - } e.UpdateLogger(nil) } diff --git a/pkg/endpoint/endpoint_status_test.go b/pkg/endpoint/endpoint_status_test.go index 878f20365e76a..bee6de1cc50dc 100644 --- a/pkg/endpoint/endpoint_status_test.go +++ b/pkg/endpoint/endpoint_status_test.go @@ -39,7 +39,7 @@ func TestGetCiliumEndpointStatus(t *testing.T) { "k8s:name=probe", }, State: models.EndpointStateWaitingDashForDashIdentity.Pointer(), - }, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + }, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) status := e.GetCiliumEndpointStatus() @@ -83,7 +83,7 @@ func TestGetCiliumEndpointStatusWithServiceAccount(t *testing.T) { "k8s:name=probe", }, State: models.EndpointStateWaitingDashForDashIdentity.Pointer(), - }, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + }, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) // Create a mock pod with ServiceAccount diff --git a/pkg/endpoint/endpoint_test.go b/pkg/endpoint/endpoint_test.go index f068fec8def26..8a0a4000e9dc4 100644 --- a/pkg/endpoint/endpoint_test.go +++ b/pkg/endpoint/endpoint_test.go @@ -195,7 +195,7 @@ func TestEndpointDatapathOptions(t *testing.T) { DatapathConfiguration: &models.EndpointDatapathConfiguration{ DisableSipVerification: true, }, - }, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + }, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) require.Equal(t, option.OptionDisabled, e.Options.GetValue(option.SourceIPVerification)) } @@ -205,7 +205,7 @@ func TestEndpointUpdateLabels(t *testing.T) { logger := hivetest.Logger(t) model := newTestEndpointModel(100, StateWaitingForIdentity) - e, err := NewEndpointFromChangeModel(t.Context(), hivetest.Logger(t), nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + e, err := NewEndpointFromChangeModel(t.Context(), hivetest.Logger(t), nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) e.Start(uint16(model.ID)) @@ -252,7 +252,7 @@ func TestEndpointState(t *testing.T) { logger := hivetest.Logger(t) model := newTestEndpointModel(100, StateWaitingForIdentity) - e, err := NewEndpointFromChangeModel(t.Context(), hivetest.Logger(t), nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + e, err := NewEndpointFromChangeModel(t.Context(), hivetest.Logger(t), nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) e.Start(uint16(model.ID)) t.Cleanup(e.Stop) @@ -640,7 +640,7 @@ func TestEndpointEventQueueDeadlockUponStop(t *testing.T) { }() model := newTestEndpointModel(12345, StateReady) - ep, err := NewEndpointFromChangeModel(t.Context(), hivetest.Logger(t), nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := NewEndpointFromChangeModel(t.Context(), hivetest.Logger(t), nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) @@ -722,7 +722,7 @@ func BenchmarkEndpointGetModel(b *testing.B) { logger := hivetest.Logger(b) model := newTestEndpointModel(100, StateWaitingForIdentity) - e, err := NewEndpointFromChangeModel(b.Context(), logger, nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + e, err := NewEndpointFromChangeModel(b.Context(), logger, nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(b, err) e.Start(uint16(model.ID)) @@ -801,7 +801,7 @@ func TestMetadataResolver(t *testing.T) { t.Run(fmt.Sprintf("%s (restored=%t)", tt.name, restored), func(t *testing.T) { model := newTestEndpointModel(100, StateWaitingForIdentity) kvstoreSync := ipcache.NewIPIdentitySynchronizer(logger, kvstore.SetupDummy(t, kvstore.DisabledBackendName)) - ep, err := NewEndpointFromChangeModel(t.Context(), logger, nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, &fakeTypes.BandwidthManager{}, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), kvstoreSync, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := NewEndpointFromChangeModel(t.Context(), logger, nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, &fakeTypes.BandwidthManager{}, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), kvstoreSync, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.K8sNamespace, ep.K8sPodName, ep.K8sUID = "bar", "foo", "uid" diff --git a/pkg/endpoint/log.go b/pkg/endpoint/log.go index cc71bfcc1ded7..1d72473824082 100644 --- a/pkg/endpoint/log.go +++ b/pkg/endpoint/log.go @@ -6,11 +6,6 @@ package endpoint import ( "context" "log/slog" - "os" - "path/filepath" - "strconv" - - "github.com/cilium/lumberjack/v2" "github.com/cilium/cilium/pkg/logging" "github.com/cilium/cilium/pkg/logging/logfields" @@ -143,27 +138,7 @@ func (e *Endpoint) updatePolicyLogger(fields map[string]any) { policyLogger := e.policyLogger.Load() // e.Options check needed for unit testing. if policyLogger == nil && e.Options != nil && e.Options.IsEnabled(option.DebugPolicy) { - maxSize := 10 // 10 MB - if ms := os.Getenv("CILIUM_DBG_POLICY_LOG_MAX_SIZE"); ms != "" { - if ms, err := strconv.Atoi(ms); err == nil { - maxSize = ms - } - } - maxBackups := 3 - if mb := os.Getenv("CILIUM_DBG_POLICY_LOG_MAX_BACKUPS"); mb != "" { - if mb, err := strconv.Atoi(mb); err == nil { - maxBackups = mb - } - } - lumberjackLogger := &lumberjack.Logger{ - Filename: filepath.Join(option.Config.StateDir, "endpoint-policy.log"), - MaxSize: maxSize, - MaxBackups: maxBackups, - MaxAge: 28, // days - LocalTime: true, - Compress: true, - } - baseLogger := slog.New(slog.NewTextHandler(lumberjackLogger, &slog.HandlerOptions{ + baseLogger := slog.New(slog.NewTextHandler(e.policyDebugLog, &slog.HandlerOptions{ Level: slog.LevelDebug, ReplaceAttr: logging.ReplaceAttrFn, })) diff --git a/pkg/endpoint/log_test.go b/pkg/endpoint/log_test.go index df882b0d54df5..82098379338c4 100644 --- a/pkg/endpoint/log_test.go +++ b/pkg/endpoint/log_test.go @@ -26,11 +26,14 @@ import ( func TestPolicyLog(t *testing.T) { setupEndpointSuite(t) logger := hivetest.Logger(t) + logPath := filepath.Join(option.Config.StateDir, "endpoint-policy.log") + f, err := os.Create(logPath) + require.NoError(t, err) do := &DummyOwner{repo: policy.NewPolicyRepository(logger, nil, nil, nil, nil, testpolicy.NewPolicyMetricsNoop())} model := newTestEndpointModel(12345, StateReady) - ep, err := NewEndpointFromChangeModel(t.Context(), logger, nil, &MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, do.repo, testipcache.NewMockIPCache(), nil, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := NewEndpointFromChangeModel(t.Context(), logger, nil, &MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, do.repo, testipcache.NewMockIPCache(), nil, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, f) require.NoError(t, err) ep.Start(uint16(model.ID)) @@ -48,7 +51,7 @@ func TestPolicyLog(t *testing.T) { require.NotNil(t, policyLogger) defer func() { // remote created log file when we are done. - err := os.Remove(filepath.Join(option.Config.StateDir, "endpoint-policy.log")) + err := os.Remove(logPath) require.NoError(t, err) }() @@ -67,7 +70,7 @@ func TestPolicyLog(t *testing.T) { require.Nil(t, policyLogger) // Verify file exists and contains the logged message - buf, err := os.ReadFile(filepath.Join(option.Config.StateDir, "endpoint-policy.log")) + buf, err := os.ReadFile(logPath) require.NoError(t, err) require.True(t, bytes.Contains(buf, []byte("testing policy logging"))) require.True(t, bytes.Contains(buf, []byte("testing PolicyDebug"))) diff --git a/pkg/endpoint/redirect_test.go b/pkg/endpoint/redirect_test.go index 787a82d810588..9b4ba72e47c42 100644 --- a/pkg/endpoint/redirect_test.go +++ b/pkg/endpoint/redirect_test.go @@ -169,7 +169,7 @@ func (s *RedirectSuite) NewTestEndpoint(t *testing.T) *Endpoint { logger := hivetest.Logger(t) model := newTestEndpointModel(12345, StateRegenerating) kvstoreSync := ipcache.NewIPIdentitySynchronizer(logger, kvstore.SetupDummy(t, kvstore.DisabledBackendName)) - ep, err := NewEndpointFromChangeModel(t.Context(), logger, nil, &MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.do.repo, testipcache.NewMockIPCache(), s.rsp, s.mgr, ctmap.NewFakeGCRunner(), kvstoreSync, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := NewEndpointFromChangeModel(t.Context(), logger, nil, &MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.do.repo, testipcache.NewMockIPCache(), s.rsp, s.mgr, ctmap.NewFakeGCRunner(), kvstoreSync, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) diff --git a/pkg/endpoint/restore_test.go b/pkg/endpoint/restore_test.go index a10f956c0d030..31362b19d5edb 100644 --- a/pkg/endpoint/restore_test.go +++ b/pkg/endpoint/restore_test.go @@ -66,7 +66,7 @@ func (s *EndpointSuite) endpointCreator(t testing.TB, id uint16, secID identity. identity.Sanitize() model := newTestEndpointModel(int(id), StateReady) - ep, err := NewEndpointFromChangeModel(context.TODO(), hivetest.Logger(t), nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := NewEndpointFromChangeModel(context.TODO(), hivetest.Logger(t), nil, &MockEndpointBuildQueue{}, nil, s.orchestrator, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) diff --git a/pkg/endpointmanager/endpointsynchronizer.go b/pkg/endpointmanager/endpointsynchronizer.go index da513aefd7400..9a72b7c819eb0 100644 --- a/pkg/endpointmanager/endpointsynchronizer.go +++ b/pkg/endpointmanager/endpointsynchronizer.go @@ -204,6 +204,11 @@ func (epSync *EndpointSynchronizer) RunK8sCiliumEndpointSync(e *endpoint.Endpoin // continue the execution so we update the endpoint // status immediately upon endpoint creation + + case errors.Is(err, context.Canceled): + // Do not log a warning in case of errors due to the + // endpoint being terminated. + return nil default: scopedLog.Warn("Error getting CEP", logfields.Error, err) return err diff --git a/pkg/endpointmanager/gc_test.go b/pkg/endpointmanager/gc_test.go index d50abe5d2a3cd..1b1b7199baa76 100644 --- a/pkg/endpointmanager/gc_test.go +++ b/pkg/endpointmanager/gc_test.go @@ -47,7 +47,7 @@ func TestMarkAndSweep(t *testing.T) { allEndpointIDs := append(healthyEndpointIDs, endpointIDToDelete) for _, id := range allEndpointIDs { model := newTestEndpointModel(int(id), endpoint.StateReady) - ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) diff --git a/pkg/endpointmanager/manager_test.go b/pkg/endpointmanager/manager_test.go index 406f78ed0d4d7..9c651e464f313 100644 --- a/pkg/endpointmanager/manager_test.go +++ b/pkg/endpointmanager/manager_test.go @@ -391,7 +391,7 @@ func TestLookup(t *testing.T) { logger := hivetest.Logger(t) mgr := New(logger, nil, &dummyEpSyncher{}, nil, nil, nil, defaultEndpointManagerConfig) if tt.cm != nil { - ep, err = endpoint.NewEndpointFromChangeModel(context.Background(), logger, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, tt.cm, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err = endpoint.NewEndpointFromChangeModel(context.Background(), logger, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, tt.cm, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoErrorf(t, err, "Test Name: %s", tt.name) err = mgr.expose(ep) require.NoErrorf(t, err, "Test Name: %s", tt.name) @@ -416,7 +416,7 @@ func TestLookupCiliumID(t *testing.T) { model := newTestEndpointModel(2, endpoint.StateReady) mgr := New(logger, nil, &dummyEpSyncher{}, nil, nil, nil, defaultEndpointManagerConfig) - ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) @@ -494,7 +494,7 @@ func TestLookupCNIAttachmentID(t *testing.T) { ep, err := endpoint.NewEndpointFromChangeModel(context.Background(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, nil, nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, &apiv1.EndpointChangeRequest{ ContainerID: "foo", ContainerInterfaceName: "bar", - }, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + }, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) require.NoError(t, mgr.expose(ep)) @@ -514,7 +514,7 @@ func TestLookupIPv4(t *testing.T) { mgr := New(logger, nil, &dummyEpSyncher{}, nil, nil, nil, defaultEndpointManagerConfig) model := newTestEndpointModel(4, endpoint.StateReady) - ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) @@ -667,7 +667,7 @@ func TestLookupCEPName(t *testing.T) { }, } for _, tt := range tests { - ep, err := endpoint.NewEndpointFromChangeModel(context.Background(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, nil, nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, &tt.cm, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := endpoint.NewEndpointFromChangeModel(context.Background(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, nil, nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, &tt.cm, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoErrorf(t, err, "Test Name: %s", tt.name) tt.preTestRun(ep) args := tt.setupArgs() @@ -710,7 +710,7 @@ func TestUpdateReferences(t *testing.T) { } for _, tt := range tests { var err error - ep, err = endpoint.NewEndpointFromChangeModel(context.Background(), hivetest.Logger(t), nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, nil, nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, &tt.cm, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err = endpoint.NewEndpointFromChangeModel(context.Background(), hivetest.Logger(t), nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, nil, nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, &tt.cm, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoErrorf(t, err, "Test Name: %s", tt.name) logger := hivetest.Logger(t) mgr := New(logger, nil, &dummyEpSyncher{}, nil, nil, nil, defaultEndpointManagerConfig) @@ -746,7 +746,7 @@ func TestRemove(t *testing.T) { logger := hivetest.Logger(t) mgr := New(logger, nil, &dummyEpSyncher{}, nil, nil, nil, defaultEndpointManagerConfig) model := newTestEndpointModel(7, endpoint.StateReady) - ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) @@ -792,7 +792,7 @@ func TestWaitForEndpointsAtPolicyRev(t *testing.T) { logger := hivetest.Logger(t) mgr := New(logger, nil, &dummyEpSyncher{}, nil, nil, nil, defaultEndpointManagerConfig) model := newTestEndpointModel(1, endpoint.StateReady) - ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) @@ -835,7 +835,7 @@ func TestWaitForEndpointsAtPolicyRev(t *testing.T) { postTestRun: func() { mgr.WaitEndpointRemoved(ep) model := newTestEndpointModel(1, endpoint.StateReady) - ep, err = endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err = endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) @@ -866,7 +866,7 @@ func TestWaitForEndpointsAtPolicyRev(t *testing.T) { postTestRun: func() { mgr.WaitEndpointRemoved(ep) model := newTestEndpointModel(1, endpoint.StateReady) - ep, err = endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err = endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) @@ -897,7 +897,7 @@ func TestWaitForEndpointsAtPolicyRev(t *testing.T) { postTestRun: func() { mgr.WaitEndpointRemoved(ep) model := newTestEndpointModel(1, endpoint.StateReady) - ep, err = endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err = endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) @@ -937,7 +937,7 @@ func TestMissingNodeLabelsUpdate(t *testing.T) { // Create host endpoint and expose it in the endpoint manager. model := newTestEndpointModel(1, endpoint.StateReady) kvstoreSync := ipcache.NewIPIdentitySynchronizer(logger, kvstore.SetupDummy(t, kvstore.DisabledBackendName)) - ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), kvstoreSync, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), kvstoreSync, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) @@ -989,7 +989,7 @@ func TestUpdateHostEndpointLabels(t *testing.T) { name: "Add labels", preTestRun: func() { model := newTestEndpointModel(1, endpoint.StateReady) - ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), kvstoreSync, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), kvstoreSync, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) @@ -1020,7 +1020,7 @@ func TestUpdateHostEndpointLabels(t *testing.T) { preTestRun: func() { model := newTestEndpointModel(1, endpoint.StateReady) model.Labels = apiv1.Labels([]string{"k8s:k1=v1"}) - ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), kvstoreSync, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), kvstoreSync, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep.Start(uint16(model.ID)) @@ -1053,7 +1053,7 @@ func TestUpdateHostEndpointLabels(t *testing.T) { model := newTestEndpointModel(1, endpoint.StateReady) model.Labels = apiv1.Labels([]string{"k8s:k1=v1"}) kvstoreSync := ipcache.NewIPIdentitySynchronizer(logger, kvstore.SetupDummy(t, kvstore.DisabledBackendName)) - ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), kvstoreSync, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := endpoint.NewEndpointFromChangeModel(t.Context(), logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), kvstoreSync, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) ep.SetIsHost(true) require.NoError(t, err) diff --git a/pkg/envoy/xds_server.go b/pkg/envoy/xds_server.go index 974fceaba6fac..e857b09502f60 100644 --- a/pkg/envoy/xds_server.go +++ b/pkg/envoy/xds_server.go @@ -395,7 +395,10 @@ func (s *xdsServer) getHttpFilterChainProto(clusterName string, tls bool, isIngr } hcmConfig := &envoy_config_http.HttpConnectionManager{ - StatPrefix: "proxy", + StatPrefix: "proxy", + UpgradeConfigs: []*envoy_config_http.HttpConnectionManager_UpgradeConfig{ + {UpgradeType: "websocket"}, + }, UseRemoteAddress: &wrapperspb.BoolValue{Value: true}, SkipXffAppend: true, XffNumTrustedHops: xffNumTrustedHops, diff --git a/pkg/fqdn/dnsproxy/proxy_test.go b/pkg/fqdn/dnsproxy/proxy_test.go index dceacedffecc3..01430e4e138e0 100644 --- a/pkg/fqdn/dnsproxy/proxy_test.go +++ b/pkg/fqdn/dnsproxy/proxy_test.go @@ -170,7 +170,7 @@ func (s *DNSProxyTestSuite) LookupRegisteredEndpoint(ip netip.Addr) (*endpoint.E return nil, false, fmt.Errorf("No EPs available when restoring") } model := newTestEndpointModel(int(epID1), endpoint.StateReady) - ep, err := endpoint.NewEndpointFromChangeModel(context.TODO(), s.logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(s.logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep, err := endpoint.NewEndpointFromChangeModel(context.TODO(), s.logger, nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(s.logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) ep.Start(uint16(model.ID)) defer ep.Stop() return ep, false, err @@ -878,7 +878,7 @@ func TestPrivilegedFullPathDependence(t *testing.T) { // Restore rules model := newTestEndpointModel(int(epID1), endpoint.StateReady) - ep1, err := endpoint.NewEndpointFromChangeModel(t.Context(), hivetest.Logger(t), nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep1, err := endpoint.NewEndpointFromChangeModel(t.Context(), hivetest.Logger(t), nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep1.Start(uint16(model.ID)) @@ -930,7 +930,7 @@ func TestPrivilegedFullPathDependence(t *testing.T) { // Restore rules for epID3 modelEP3 := newTestEndpointModel(int(epID3), endpoint.StateReady) - ep3, err := endpoint.NewEndpointFromChangeModel(t.Context(), hivetest.Logger(t), nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, modelEP3, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep3, err := endpoint.NewEndpointFromChangeModel(t.Context(), hivetest.Logger(t), nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, modelEP3, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep3.Start(uint16(modelEP3.ID)) @@ -1141,7 +1141,7 @@ func TestPrivilegedRestoredEndpoint(t *testing.T) { // restore rules, set the mock to restoring state s.restoring = true model := newTestEndpointModel(int(epID1), endpoint.StateReady) - ep1, err := endpoint.NewEndpointFromChangeModel(t.Context(), hivetest.Logger(t), nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}) + ep1, err := endpoint.NewEndpointFromChangeModel(t.Context(), hivetest.Logger(t), nil, &endpoint.MockEndpointBuildQueue{}, nil, nil, nil, nil, nil, identitymanager.NewIDManager(logger), nil, nil, s.repo, testipcache.NewMockIPCache(), &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), ctmap.NewFakeGCRunner(), nil, model, fakeTypes.WireguardConfig{}, fakeTypes.IPsecConfig{}, nil) require.NoError(t, err) ep1.Start(uint16(model.ID)) diff --git a/pkg/health/health_connectivity_node.go b/pkg/health/health_connectivity_node.go index 2d6bf46b600e6..3f6972fd15944 100644 --- a/pkg/health/health_connectivity_node.go +++ b/pkg/health/health_connectivity_node.go @@ -56,7 +56,7 @@ func (h *ciliumHealthManager) launchCiliumNodeHealth(spec *healthApi.Spec, initi HealthAPISpec: spec, } - ch.server, err = server.NewServer(h.logger, config) + ch.server, err = server.NewServer(h.logger, config, h.healthConfig.IsActiveHealthCheckingEnabled()) if err != nil { return nil, fmt.Errorf("failed to instantiate cilium-health server: %w", err) } diff --git a/pkg/health/health_manager.go b/pkg/health/health_manager.go index 9ba7cf32dbd67..7e961f02ebd0c 100644 --- a/pkg/health/health_manager.go +++ b/pkg/health/health_manager.go @@ -11,6 +11,8 @@ import ( "github.com/cilium/hive/cell" + "github.com/cilium/cilium/pkg/healthconfig" + healthApi "github.com/cilium/cilium/api/v1/health/server" "github.com/cilium/cilium/api/v1/models" "github.com/cilium/cilium/pkg/controller" @@ -62,6 +64,8 @@ type ciliumHealthManager struct { ctrlMgr *controller.Manager ciliumHealth *CiliumHealth + + healthConfig healthconfig.CiliumHealthConfig } type ciliumHealthParams struct { @@ -77,6 +81,7 @@ type ciliumHealthParams struct { EndpointCreator endpointcreator.EndpointCreator EndpointManager endpointmanager.EndpointManager K8sClientSet k8sClient.Clientset + Config healthconfig.CiliumHealthConfig } func newCiliumHealthManager(params ciliumHealthParams) CiliumHealthManager { @@ -90,6 +95,7 @@ func newCiliumHealthManager(params ciliumHealthParams) CiliumHealthManager { endpointCreator: params.EndpointCreator, endpointManager: params.EndpointManager, k8sClientSet: params.K8sClientSet, + healthConfig: params.Config, } return h @@ -106,7 +112,7 @@ func (h *ciliumHealthManager) Init(ctx context.Context, routingInfo *linuxroutin h.ciliumHealth = ch // If endpoint health checking is disabled, the virtual endpoint does not need to be launched - if !option.Config.EnableEndpointHealthChecking { + if !h.healthConfig.IsEndpointHealthCheckingEnabled() { return nil } diff --git a/pkg/health/server/server.go b/pkg/health/server/server.go index ed909c9cc22b4..444904eb52907 100644 --- a/pkg/health/server/server.go +++ b/pkg/health/server/server.go @@ -69,6 +69,9 @@ type Server struct { localStatus *healthModels.SelfStatus nodesSeen map[string]struct{} + + // Enable/disable periodic health and connectivity checks + enableActiveChecks bool } // DumpUptime returns the time that this server has been running. @@ -386,9 +389,11 @@ func (s *Server) Serve() (err error) { errors <- s.httpPathServer.Serve() }() - go func() { - errors <- s.runActiveServices() - }() + if s.enableActiveChecks { + go func() { + errors <- s.runActiveServices() + }() + } // Block for the first error, then return. err = <-errors @@ -424,13 +429,14 @@ func (s *Server) newServer(logger *slog.Logger, spec *healthApi.Spec) *healthApi } // NewServer creates a server to handle health requests. -func NewServer(logger *slog.Logger, config Config) (*Server, error) { +func NewServer(logger *slog.Logger, config Config, enableActiveChecks bool) (*Server, error) { server := &Server{ - logger: logger, - startTime: time.Now(), - Config: config, - connectivity: &healthReport{}, - nodesSeen: make(map[string]struct{}), + logger: logger, + startTime: time.Now(), + Config: config, + connectivity: &healthReport{}, + nodesSeen: make(map[string]struct{}), + enableActiveChecks: enableActiveChecks, } cl, err := ciliumPkg.NewClient(config.CiliumURI) diff --git a/pkg/healthconfig/cell.go b/pkg/healthconfig/cell.go new file mode 100644 index 0000000000000..d2f31ebf78d81 --- /dev/null +++ b/pkg/healthconfig/cell.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package healthconfig + +import ( + "github.com/cilium/hive/cell" + "github.com/spf13/pflag" +) + +const ( + EnableHealthCheckingName = "enable-health-checking" + EnableEndpointHealthCheckingName = "enable-endpoint-health-checking" +) + +// Cell provides the Cilium health config. +var Cell = cell.Module( + "cilium-health-config", + "Cilium health config", + cell.Config[CiliumHealthConfig](defaultConfig), +) + +type Config struct { + EnableHealthChecking bool `mapstructure:"enable-health-checking"` + EnableEndpointHealthChecking bool `mapstructure:"enable-endpoint-health-checking"` +} + +var defaultConfig = Config{ + EnableHealthChecking: true, + EnableEndpointHealthChecking: true, +} + +type CiliumHealthConfig interface { + cell.Flagger + // IsHealthCheckingEnabled checks whether health server API and active health checks are enabled + IsHealthCheckingEnabled() bool + // IsEndpointHealthCheckingEnabled checks whether enables active checks to virtual health endpoints are enabled + IsEndpointHealthCheckingEnabled() bool + // IsActiveHealthCheckingEnabled checks whether periodic active health checks are enabled + IsActiveHealthCheckingEnabled() bool +} + +func (c Config) IsHealthCheckingEnabled() bool { + return c.EnableHealthChecking +} + +func (c Config) IsEndpointHealthCheckingEnabled() bool { + return c.EnableEndpointHealthChecking +} + +func (c Config) IsActiveHealthCheckingEnabled() bool { + return true +} + +func (c Config) Flags(flags *pflag.FlagSet) { + flags.Bool(EnableHealthCheckingName, c.EnableHealthChecking, "Enable connectivity health checking") + flags.Bool(EnableEndpointHealthCheckingName, c.EnableEndpointHealthChecking, "Enable connectivity health checking between virtual endpoints") +} diff --git a/pkg/healthconfig/healthconfig_test.go b/pkg/healthconfig/healthconfig_test.go new file mode 100644 index 0000000000000..e0984023f0a03 --- /dev/null +++ b/pkg/healthconfig/healthconfig_test.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package healthconfig + +import ( + "context" + "testing" + + "github.com/cilium/hive" + "github.com/cilium/hive/cell" + "github.com/cilium/hive/hivetest" + "github.com/spf13/pflag" + "github.com/stretchr/testify/require" +) + +func Test_healthConfig(t *testing.T) { + var hc CiliumHealthConfig + hive := hive.New( + Cell, + cell.Invoke(func(cfg CiliumHealthConfig) { hc = cfg }), + ) + + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + hive.RegisterFlags(flags) + flags.Set(EnableHealthCheckingName, "false") + flags.Set(EnableEndpointHealthCheckingName, "false") + + tlog := hivetest.Logger(t) + require.NoError(t, hive.Start(tlog, context.Background())) + + require.False(t, hc.IsHealthCheckingEnabled()) + require.False(t, hc.IsEndpointHealthCheckingEnabled()) +} diff --git a/pkg/hubble/cell/cell.go b/pkg/hubble/cell/cell.go index 59f6ad5d89cc3..049b6ce871006 100644 --- a/pkg/hubble/cell/cell.go +++ b/pkg/hubble/cell/cell.go @@ -19,6 +19,7 @@ import ( exportercell "github.com/cilium/cilium/pkg/hubble/exporter/cell" "github.com/cilium/cilium/pkg/hubble/metrics" metricscell "github.com/cilium/cilium/pkg/hubble/metrics/cell" + "github.com/cilium/cilium/pkg/hubble/observer/namespace" "github.com/cilium/cilium/pkg/hubble/observer/observeroption" "github.com/cilium/cilium/pkg/hubble/parser" parsercell "github.com/cilium/cilium/pkg/hubble/parser/cell" @@ -44,14 +45,17 @@ var Cell = cell.Module( // Hubble flow log exporters exportercell.Cell, - // Parser for Hubble flows - parsercell.Cell, - // Metrics server and flow processor metricscell.Cell, // Drop event emitter flow processor dropeventemitter.Cell, + + // Parser for Hubble flows + parsercell.Cell, + + // Hubble flows k8s namespaces monitor + namespace.Cell, ) // The core cell group, which contains the Hubble integration and the @@ -85,7 +89,8 @@ type hubbleParams struct { DropEventEmitter dropeventemitter.FlowProcessor - PayloadParser parser.Decoder + PayloadParser parser.Decoder + NamespaceManager namespace.Manager GRPCMetrics *grpc_prometheus.ServerMetrics MetricsFlowProcessor metrics.FlowProcessor @@ -112,6 +117,7 @@ func newHubbleIntegration(params hubbleParams) (HubbleIntegration, error) { params.ExporterBuilders, params.DropEventEmitter, params.PayloadParser, + params.NamespaceManager, params.GRPCMetrics, params.MetricsFlowProcessor, params.Config, diff --git a/pkg/hubble/cell/config.go b/pkg/hubble/cell/config.go index c56fd27f380e1..617c60c55491f 100644 --- a/pkg/hubble/cell/config.go +++ b/pkg/hubble/cell/config.go @@ -14,6 +14,7 @@ import ( hubbleDefaults "github.com/cilium/cilium/pkg/hubble/defaults" "github.com/cilium/cilium/pkg/hubble/observer/observeroption" monitorAPI "github.com/cilium/cilium/pkg/monitor/api" + "github.com/cilium/cilium/pkg/time" ) type config struct { @@ -28,6 +29,9 @@ type config struct { // MonitorEvents specifies Cilium monitor events for Hubble to observe. By // default, Hubble observes all monitor events. MonitorEvents []string `mapstructure:"hubble-monitor-events"` + // LostEventSendInterval specifies the interval at which lost events are + // sent from the Observer server, if any. + LostEventSendInterval time.Duration `mapstructure:"hubble-lost-event-send-interval"` // SocketPath specifies the UNIX domain socket for Hubble server to listen // to. @@ -43,9 +47,10 @@ type config struct { var defaultConfig = config{ EnableHubble: false, // Hubble internals (parser, ringbuffer) configuration - EventBufferCapacity: observeroption.Default.MaxFlows.AsInt(), - EventQueueSize: 0, // see getDefaultMonitorQueueSize() - MonitorEvents: []string{}, + EventBufferCapacity: observeroption.Default.MaxFlows.AsInt(), + EventQueueSize: 0, // see getDefaultMonitorQueueSize() + MonitorEvents: []string{}, + LostEventSendInterval: hubbleDefaults.LostEventSendInterval, // Hubble local server configuration SocketPath: hubbleDefaults.SocketPath, // Hubble TCP server configuration @@ -64,6 +69,7 @@ func (def config) Flags(flags *pflag.FlagSet) { strings.Join(monitorAPI.AllMessageTypeNames(), " "), ), ) + flags.Duration("hubble-lost-event-send-interval", def.LostEventSendInterval, "Interval at which lost events are sent from the Observer server, if any.") // Hubble local server configuration flags.String("hubble-socket-path", def.SocketPath, "Set hubble's socket path to listen for connections") // Hubble TCP server configuration diff --git a/pkg/hubble/cell/hubbleintegration.go b/pkg/hubble/cell/hubbleintegration.go index bf10cb5e522ec..550ec8196cb8f 100644 --- a/pkg/hubble/cell/hubbleintegration.go +++ b/pkg/hubble/cell/hubbleintegration.go @@ -31,6 +31,7 @@ import ( "github.com/cilium/cilium/pkg/hubble/metrics" "github.com/cilium/cilium/pkg/hubble/monitor" "github.com/cilium/cilium/pkg/hubble/observer" + "github.com/cilium/cilium/pkg/hubble/observer/namespace" "github.com/cilium/cilium/pkg/hubble/observer/observeroption" "github.com/cilium/cilium/pkg/hubble/parser" "github.com/cilium/cilium/pkg/hubble/peer" @@ -75,6 +76,8 @@ type hubbleIntegration struct { // payloadParser is used to decode monitor events into Hubble events. payloadParser parser.Decoder + // nsManager is used to monitor the namespaces seen in Hubble flows. + nsManager namespace.Manager // GRPC metrics are registered on the Hubble gRPC server and are // exposed by the Hubble metrics server (from hubble-metrics cell). @@ -98,6 +101,7 @@ func new( exporterBuilders []*exportercell.FlowLogExporterBuilder, dropEventEmitter dropeventemitter.FlowProcessor, payloadParser parser.Decoder, + nsManager namespace.Manager, grpcMetrics *grpc_prometheus.ServerMetrics, metricsFlowProcessor metrics.FlowProcessor, config config, @@ -119,14 +123,15 @@ func new( endpointManager: endpointManager, ipcache: ipcache, cgroupManager: cgroupManager, - dropEventEmitter: dropEventEmitter, nodeManager: nodeManager, nodeLocalStore: nodeLocalStore, monitorAgent: monitorAgent, tlsConfigPromise: tlsConfigPromise, observerOptions: observerOptions, exporters: exporters, + dropEventEmitter: dropEventEmitter, payloadParser: payloadParser, + nsManager: nsManager, grpcMetrics: grpcMetrics, metricsFlowProcessor: metricsFlowProcessor, config: config, @@ -243,6 +248,7 @@ func (h *hubbleIntegration) launch(ctx context.Context) (*observer.LocalObserver observerOpts = append(observerOpts, observeroption.WithMaxFlows(maxFlows), observeroption.WithMonitorBuffer(h.config.EventQueueSize), + observeroption.WithLostEventSendInterval(h.config.LostEventSendInterval), ) // register exporters @@ -263,12 +269,9 @@ func (h *hubbleIntegration) launch(ctx context.Context) (*observer.LocalObserver // for explicit ordering of known dependencies observerOpts = append(observerOpts, h.observerOptions...) - namespaceManager := observer.NewNamespaceManager() - go namespaceManager.Run(ctx) - hubbleObserver, err := observer.NewLocalServer( h.payloadParser, - namespaceManager, + h.nsManager, h.log, observerOpts..., ) @@ -276,7 +279,7 @@ func (h *hubbleIntegration) launch(ctx context.Context) (*observer.LocalObserver return nil, fmt.Errorf("failed to initialize observer server: %w", err) } go hubbleObserver.Start() - h.monitorAgent.RegisterNewConsumer(monitor.NewConsumer(hubbleObserver)) + h.monitorAgent.RegisterNewConsumer(monitor.NewConsumer(hubbleObserver, h.config.LostEventSendInterval)) tlsEnabled := h.tlsConfigPromise != nil diff --git a/pkg/hubble/defaults/defaults.go b/pkg/hubble/defaults/defaults.go index fc6022001749a..853904d3573cd 100644 --- a/pkg/hubble/defaults/defaults.go +++ b/pkg/hubble/defaults/defaults.go @@ -3,7 +3,11 @@ package defaults -import ciliumDefaults "github.com/cilium/cilium/pkg/defaults" +import ( + "time" + + ciliumDefaults "github.com/cilium/cilium/pkg/defaults" +) const ( // ServerPort is the default port for hubble server when a provided @@ -30,4 +34,9 @@ const ( // SocketPath is the path to the UNIX domain socket exposing the Hubble API // to clients locally. SocketPath = ciliumDefaults.RuntimePath + "/hubble.sock" + + // LostEventSendInterval is the default interval at which lost events are sent + // from the Observer server, if any. The default of 1s matches Hubble + // Relay's SortBufferDrainTimeout. + LostEventSendInterval = 1 * time.Second ) diff --git a/pkg/hubble/filters/filters.go b/pkg/hubble/filters/filters.go index 04f558b588e3f..b1f4312c7c30d 100644 --- a/pkg/hubble/filters/filters.go +++ b/pkg/hubble/filters/filters.go @@ -149,5 +149,6 @@ func DefaultFilters(log *slog.Logger) []OnBuildFilter { &TrafficDirectionFilter{}, &CELExpressionFilter{log: log}, &NetworkInterfaceFilter{}, + &IPTraceIDFilter{}, } } diff --git a/pkg/hubble/filters/ip_tracing.go b/pkg/hubble/filters/ip_tracing.go new file mode 100644 index 0000000000000..71258faf593ca --- /dev/null +++ b/pkg/hubble/filters/ip_tracing.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Hubble + +package filters + +import ( + "context" + "slices" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +func filterByIPTraceID(tids []uint64) FilterFunc { + return func(ev *v1.Event) bool { + trace := ev.GetFlow().GetIpTraceId().GetTraceId() + return slices.Contains(tids, trace) + } +} + +// TraceIDFilter implements filtering based on IP trace IDs. +type IPTraceIDFilter struct{} + +// OnBuildFilter builds a IP trace ID filter. +func (t *IPTraceIDFilter) OnBuildFilter(_ context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) { + var fs []FilterFunc + if ids := ff.GetIpTraceId(); len(ids) > 0 { + fs = append(fs, filterByIPTraceID(ids)) + } + return fs, nil +} diff --git a/pkg/hubble/filters/ip_tracing_test.go b/pkg/hubble/filters/ip_tracing_test.go new file mode 100644 index 0000000000000..2294fd15d48a6 --- /dev/null +++ b/pkg/hubble/filters/ip_tracing_test.go @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Hubble + +package filters + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + flowpb "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +func TestIPTraceIDFilter(t *testing.T) { + ctx := context.Background() + tests := []struct { + name string + f []*flowpb.FlowFilter + ev *v1.Event + want bool + }{ + { + name: "match_single_filter", + f: []*flowpb.FlowFilter{ + {IpTraceId: []uint64{1}}, + }, + ev: &v1.Event{ + Event: &flowpb.Flow{ + IpTraceId: &flowpb.IPTraceID{ + TraceId: 1, + }, + }, + }, + want: true, + }, + { + name: "match_multiple_filters", + f: []*flowpb.FlowFilter{ + {IpTraceId: []uint64{1}}, + {IpTraceId: []uint64{2}}, + }, + ev: &v1.Event{ + Event: &flowpb.Flow{ + IpTraceId: &flowpb.IPTraceID{ + TraceId: 2, + }, + }, + }, + want: true, + }, + { + name: "no_filter", + ev: &v1.Event{ + Event: &flowpb.Flow{ + IpTraceId: &flowpb.IPTraceID{ + TraceId: 1, + }, + }, + }, + want: true, + }, + { + name: "mismatch", + f: []*flowpb.FlowFilter{ + {IpTraceId: []uint64{1}}, + }, + ev: &v1.Event{ + Event: &flowpb.Flow{ + IpTraceId: &flowpb.IPTraceID{ + TraceId: 2, + }, + }, + }, + want: false, + }, + { + name: "no_trace_id", + f: []*flowpb.FlowFilter{ + {IpTraceId: []uint64{1}}, + }, + ev: &v1.Event{ + Event: &flowpb.Flow{}, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fl, err := BuildFilterList(ctx, tt.f, []OnBuildFilter{&IPTraceIDFilter{}}) + if err != nil { + t.Fatalf("Faile to build filter: %v", err) + } + assert.Equal(t, tt.want, fl.MatchOne(tt.ev)) + }) + } +} diff --git a/pkg/hubble/metrics/metrics_test.go b/pkg/hubble/metrics/metrics_test.go index 17fedb958962a..43b9a760b02fb 100644 --- a/pkg/hubble/metrics/metrics_test.go +++ b/pkg/hubble/metrics/metrics_test.go @@ -19,6 +19,7 @@ import ( grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/util/workqueue" @@ -375,7 +376,7 @@ func TestHubbleServerWithDynamicMetrics(t *testing.T) { } func assertMetricsFromServer(t *testing.T, in io.Reader, exportedMetrics map[string][]string) { - var parser expfmt.TextParser + var parser = expfmt.NewTextParser(model.LegacyValidation) mfMap, err := parser.TextToMetricFamilies(in) if err != nil { log.Fatal(err) diff --git a/pkg/hubble/monitor/consumer.go b/pkg/hubble/monitor/consumer.go index 2d9b82a388687..7718e4faf9d38 100644 --- a/pkg/hubble/monitor/consumer.go +++ b/pkg/hubble/monitor/consumer.go @@ -11,6 +11,7 @@ import ( flowpb "github.com/cilium/cilium/api/v1/flow" "github.com/cilium/cilium/pkg/bufuuid" + "github.com/cilium/cilium/pkg/counter" "github.com/cilium/cilium/pkg/hubble/metrics" observerTypes "github.com/cilium/cilium/pkg/hubble/observer/types" "github.com/cilium/cilium/pkg/lock" @@ -27,27 +28,28 @@ type Observer interface { GetLogger() *slog.Logger } -// consumer implements monitorConsumer.MonitorConsumer +var _ monitorConsumer.MonitorConsumer = (*consumer)(nil) + +// consumer is a monitor consumer that sends events to an Observer. type consumer struct { - uuider *bufuuid.Generator - observer Observer - numEventsLost uint64 - lostLock lock.Mutex - logLimiter logging.Limiter + uuider *bufuuid.Generator + observer Observer - cachedLostNotification *observerTypes.MonitorEvent + lostLock lock.Mutex + lostEventCounter *counter.IntervalRangeCounter + logLimiter logging.Limiter metricLostPerfEvents prometheus.Counter metricLostObserverEvents prometheus.Counter } -// NewConsumer returns an initialized pointer to consumer. -func NewConsumer(observer Observer) monitorConsumer.MonitorConsumer { +// NewConsumer returns a new consumer that sends events to the provided Observer. +func NewConsumer(observer Observer, lostEventSendInterval time.Duration) *consumer { mc := &consumer{ - uuider: bufuuid.New(), - observer: observer, - numEventsLost: 0, - logLimiter: logging.NewLimiter(30*time.Second, 1), + uuider: bufuuid.New(), + observer: observer, + lostEventCounter: counter.NewIntervalRangeCounter(lostEventSendInterval), + logLimiter: logging.NewLimiter(30*time.Second, 1), metricLostPerfEvents: metrics.LostEvents.WithLabelValues( strings.ToLower(flowpb.LostEventSource_PERF_EVENT_RING_BUFFER.String())), @@ -57,73 +59,58 @@ func NewConsumer(observer Observer) monitorConsumer.MonitorConsumer { return mc } -// sendEventQueueLostEvents tries to send the current value of the lost events -// counter to the observer. If it succeeds to enqueue a notification, it -// resets the counter. Returns a boolean indicating whether the notification -// has been successfully sent. -func (c *consumer) sendNumLostEvents() bool { - c.lostLock.Lock() - defer c.lostLock.Unlock() - // check again, in case multiple - // routines contended the lock - if c.numEventsLost == 0 { - return true - } +// NotifyAgentEvent implements monitorConsumer.MonitorConsumer. +func (c *consumer) NotifyAgentEvent(typ int, message any) { + c.sendEvent(func() any { + return &observerTypes.AgentEvent{ + Type: typ, + Message: message, + } + }) +} - if c.cachedLostNotification == nil { - c.cachedLostNotification = c.newEvent(func() any { - return &observerTypes.LostEvent{ - Source: observerTypes.LostEventSourceEventsQueue, - NumLostEvents: c.numEventsLost, - } - }) - } else { - c.cachedLostNotification.Timestamp = time.Now() - c.cachedLostNotification.Payload.(*observerTypes.LostEvent).NumLostEvents = c.numEventsLost - } +// NotifyPerfEvent implements monitorConsumer.MonitorConsumer. +func (c *consumer) NotifyPerfEvent(data []byte, cpu int) { + c.sendEvent(func() any { + return &observerTypes.PerfEvent{ + Data: data, + CPU: cpu, + } + }) +} - select { - case c.observer.GetEventsChannel() <- c.cachedLostNotification: - // We now now safely reset the counter, as at this point have - // successfully notified the observer about the amount of events - // that were lost since the previous LostEvent message. Similarly, - // we reset the cached notification, so that a new one is created - // the next time. - c.numEventsLost = 0 - c.cachedLostNotification = nil - return true - default: - // We do not need to bump the numEventsLost counter here, as we will - // try to send a new LostEvent notification again during the next - // invocation of sendEvent - return false - } +// NotifyPerfEventLost implements monitorConsumer.MonitorConsumer. +func (c *consumer) NotifyPerfEventLost(numLostEvents uint64, cpu int) { + c.sendEvent(func() any { + return &observerTypes.LostEvent{ + Source: observerTypes.LostEventSourcePerfRingBuffer, + NumLostEvents: numLostEvents, + CPU: cpu, + } + }) + c.metricLostPerfEvents.Inc() } // sendEvent enqueues an event in the observer. If this is not possible, it -// keeps a counter of lost events, which it will regularly try to send to the -// observer as well +// keeps a counter of lost events, which it will try to send at most once per +// configured interval, and on every call to sendEvent until it succeeds. func (c *consumer) sendEvent(payloader func() any) { - if c.numEventsLost > 0 { - if !c.sendNumLostEvents() { - // We just failed sending the lost notification, hence it doesn't - // make sense to try and send the actual event, as we'll most - // likely fail as well. - c.countDroppedEvent() - return - } - } + c.lostLock.Lock() + defer c.lostLock.Unlock() + + now := time.Now() + c.trySendLostEventLocked(now) select { - case c.observer.GetEventsChannel() <- c.newEvent(payloader): + case c.observer.GetEventsChannel() <- c.newEvent(now, payloader): default: - c.countDroppedEvent() + c.incrementLostEventLocked(now) } } -func (c *consumer) newEvent(payloader func() any) *observerTypes.MonitorEvent { +func (c *consumer) newEvent(ts time.Time, payloader func() any) *observerTypes.MonitorEvent { ev := &observerTypes.MonitorEvent{ - Timestamp: time.Now(), + Timestamp: ts, NodeName: nodeTypes.GetAbsoluteNodeName(), Payload: payloader(), } @@ -132,50 +119,45 @@ func (c *consumer) newEvent(payloader func() any) *observerTypes.MonitorEvent { return ev } -// countDroppedEvent logs that the events channel is full -// and counts how many messages it has lost. -func (c *consumer) countDroppedEvent() { - c.lostLock.Lock() - defer c.lostLock.Unlock() - if c.numEventsLost == 0 && c.logLimiter.Allow() { - c.observer.GetLogger(). - Warn( - "hubble events queue is full: dropping messages; consider increasing the queue size (hubble-event-queue-size) or provisioning more CPU", - logfields.RelatedMetric, "hubble_lost_events_total", - ) +// trySendLostEventLocked tries to send a lost event as needed. If it succeeds, it clears the +// lost event counter, otherwise it does nothing so we keep the existing count. It assumes that +// the caller holds c.lostLock. +func (c *consumer) trySendLostEventLocked(ts time.Time) { + // check if we should send a lost event + shouldSend := c.lostEventCounter.IsElapsed(ts) + if !shouldSend { + return } - c.numEventsLost++ - c.metricLostObserverEvents.Inc() -} -// NotifyAgentEvent implements monitorConsumer.MonitorConsumer -func (c *consumer) NotifyAgentEvent(typ int, message any) { - c.sendEvent(func() any { - return &observerTypes.AgentEvent{ - Type: typ, - Message: message, + count := c.lostEventCounter.Peek() + lostEvent := c.newEvent(ts, func() any { + return &observerTypes.LostEvent{ + Source: observerTypes.LostEventSourceEventsQueue, + NumLostEvents: count.Count, + First: count.First, + Last: count.Last, } }) -} -// NotifyPerfEvent implements monitorConsumer.MonitorConsumer -func (c *consumer) NotifyPerfEvent(data []byte, cpu int) { - c.sendEvent(func() any { - return &observerTypes.PerfEvent{ - Data: data, - CPU: cpu, - } - }) + select { + case c.observer.GetEventsChannel() <- lostEvent: + // only clear the counter if we successfully sent the lost event + c.lostEventCounter.Clear() + default: + } } -// NotifyPerfEventLost implements monitorConsumer.MonitorConsumer -func (c *consumer) NotifyPerfEventLost(numLostEvents uint64, cpu int) { - c.sendEvent(func() any { - return &observerTypes.LostEvent{ - Source: observerTypes.LostEventSourcePerfRingBuffer, - NumLostEvents: numLostEvents, - CPU: cpu, - } - }) - c.metricLostPerfEvents.Inc() +// incrementLostEventLocked increments the lost event counter. It also logs a warning message if the +// counter was previously empty and the log limiter allows it. It assumes that the caller holds +// c.lostLock. +func (c *consumer) incrementLostEventLocked(ts time.Time) { + if c.lostEventCounter.Peek().Count == 0 && c.logLimiter.Allow() { + c.observer.GetLogger(). + Warn( + "hubble events queue is full: dropping messages; consider increasing the queue size (hubble-event-queue-size) or provisioning more CPU", + logfields.RelatedMetric, "hubble_lost_events_total", + ) + } + c.lostEventCounter.Increment(ts) + c.metricLostObserverEvents.Inc() } diff --git a/pkg/hubble/monitor/consumer_test.go b/pkg/hubble/monitor/consumer_test.go index afa0dc4592785..233c53de75e96 100644 --- a/pkg/hubble/monitor/consumer_test.go +++ b/pkg/hubble/monitor/consumer_test.go @@ -11,7 +11,9 @@ import ( "github.com/cilium/hive/hivetest" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/cilium/cilium/pkg/hubble/defaults" observerTypes "github.com/cilium/cilium/pkg/hubble/observer/types" "github.com/cilium/cilium/pkg/monitor/api" nodeTypes "github.com/cilium/cilium/pkg/node/types" @@ -37,7 +39,8 @@ func TestHubbleConsumer(t *testing.T) { events: make(chan *observerTypes.MonitorEvent, 1), logger: hivetest.Logger(t), } - consumer := NewConsumer(observer) + lostSendInterval := 100 * time.Millisecond + consumer := NewConsumer(observer, lostSendInterval) data := []byte{0, 1, 2, 3, 4} cpu := 5 @@ -84,10 +87,19 @@ func TestHubbleConsumer(t *testing.T) { assert.Equal(t, expected.Payload, received.Payload) assert.NotEqual(t, uuid.UUID{}, received.UUID) - // The first notification will get through, the other two will be dropped + // The first notification will get through, the others two will be dropped consumer.NotifyAgentEvent(1, nil) consumer.NotifyPerfEventLost(0, 0) // dropped consumer.NotifyPerfEvent(nil, 0) // dropped + + time.Sleep(lostSendInterval * 2) // Wait for lost event counter interval to elapse + + // try to send other events, which will also be dropped + // consumer should also try to send lost events but would not succeed + consumer.NotifyPerfEventLost(0, 0) // dropped + consumer.NotifyPerfEvent(nil, 0) // dropped + + // then receive the event before the drops happened expected = &observerTypes.MonitorEvent{ NodeName: nodeTypes.GetName(), Payload: &observerTypes.AgentEvent{ @@ -99,20 +111,27 @@ func TestHubbleConsumer(t *testing.T) { assert.Equal(t, expected.Payload, received.Payload) assert.NotEqual(t, uuid.UUID{}, received.UUID) - // Now that the channel has one slot again, send another message - // (which will be dropped) to get a lost event notifications - consumer.NotifyAgentEvent(0, nil) // dropped - - expected = &observerTypes.MonitorEvent{ - NodeName: nodeTypes.GetName(), - Payload: &observerTypes.LostEvent{ - Source: observerTypes.LostEventSourceEventsQueue, - NumLostEvents: 2, - }, + // now that we emptied the channel, the consumer should be able to send + // the lost events notification, which it tries to do if any are pending + // before the next event is sent. Since we only have a buffer of size 1, + // this event will be dropped. + consumer.NotifyPerfEvent(nil, 0) // dropped + + // receive the lost event notification which is always + // sent before the next event, and validate we receive + // the count of lost events before and after the counter + // interval elapsed. + expectedPayload := &observerTypes.LostEvent{ + Source: observerTypes.LostEventSourceEventsQueue, + NumLostEvents: 4, + // omit First, Last timestamps on-purpose as they are not predictable } received = <-observer.GetEventsChannel() - assert.Equal(t, expected.NodeName, received.NodeName) - assert.Equal(t, expected.Payload, received.Payload) + assert.Equal(t, expected.NodeName, nodeTypes.GetName()) + receivedPayload, ok := received.Payload.(*observerTypes.LostEvent) + require.Truef(t, ok, "expected payload to be of type *observerTypes.LostEvent, got %T", received.Payload) + assert.Equal(t, expectedPayload.Source, receivedPayload.Source) + assert.Equal(t, expectedPayload.NumLostEvents, receivedPayload.NumLostEvents) assert.NotEqual(t, uuid.UUID{}, received.UUID) // Verify that the events channel is empty now. @@ -140,7 +159,7 @@ func BenchmarkHubbleConsumerSendEvent(b *testing.B) { } var ( - cnsm = NewConsumer(observer) + cnsm = NewConsumer(observer, defaults.LostEventSendInterval) data = []byte{0, 1, 2, 3, 4} cpu = 5 ) diff --git a/pkg/hubble/observer/local_observer.go b/pkg/hubble/observer/local_observer.go index 3b669acabb9f7..f973ba7f46864 100644 --- a/pkg/hubble/observer/local_observer.go +++ b/pkg/hubble/observer/local_observer.go @@ -18,10 +18,12 @@ import ( flowpb "github.com/cilium/cilium/api/v1/flow" observerpb "github.com/cilium/cilium/api/v1/observer" + "github.com/cilium/cilium/pkg/counter" v1 "github.com/cilium/cilium/pkg/hubble/api/v1" "github.com/cilium/cilium/pkg/hubble/build" "github.com/cilium/cilium/pkg/hubble/container" "github.com/cilium/cilium/pkg/hubble/filters" + "github.com/cilium/cilium/pkg/hubble/observer/namespace" "github.com/cilium/cilium/pkg/hubble/observer/observeroption" observerTypes "github.com/cilium/cilium/pkg/hubble/observer/types" "github.com/cilium/cilium/pkg/hubble/parser" @@ -64,13 +66,13 @@ type LocalObserverServer struct { // numObservedFlows counts how many flows have been observed numObservedFlows atomic.Uint64 - namespaceManager NamespaceManager + nsManager namespace.Manager } // NewLocalServer returns a new local observer server. func NewLocalServer( payloadParser parser.Decoder, - namespaceManager NamespaceManager, + nsManager namespace.Manager, logger *slog.Logger, options ...observeroption.Option, ) (*LocalObserverServer, error) { @@ -89,14 +91,14 @@ func NewLocalServer( ) s := &LocalObserverServer{ - log: logger, - ring: container.NewRing(opts.MaxFlows), - events: make(chan *observerTypes.MonitorEvent, opts.MonitorBuffer), - stopped: make(chan struct{}), - payloadParser: payloadParser, - startTime: time.Now(), - namespaceManager: namespaceManager, - opts: opts, + log: logger, + ring: container.NewRing(opts.MaxFlows), + events: make(chan *observerTypes.MonitorEvent, opts.MonitorBuffer), + stopped: make(chan struct{}), + payloadParser: payloadParser, + startTime: time.Now(), + nsManager: nsManager, + opts: opts, } for _, f := range s.opts.OnServerInit { @@ -251,7 +253,7 @@ func (s *LocalObserverServer) GetNodes(ctx context.Context, req *observerpb.GetN // GetNamespaces implements observerpb.ObserverClient.GetNamespaces. func (s *LocalObserverServer) GetNamespaces(ctx context.Context, req *observerpb.GetNamespacesRequest) (*observerpb.GetNamespacesResponse, error) { - return &observerpb.GetNamespacesResponse{Namespaces: s.namespaceManager.GetNamespaces()}, nil + return &observerpb.GetNamespacesResponse{Namespaces: s.nsManager.GetNamespaces()}, nil } // GetFlows implements the proto method for client requests. @@ -289,20 +291,6 @@ func (s *LocalObserverServer) GetFlows( start := time.Now() ring := s.GetRingBuffer() - i := uint64(0) - if log.Enabled(context.Background(), slog.LevelDebug) { - defer func() { - log.Debug( - "GetFlows finished", - logfields.NumberOfFlows, i, - logfields.BufferSize, ring.Cap(), - logfields.Whitelist, logFilters(req.Whitelist), - logfields.Blacklist, logFilters(req.Blacklist), - logfields.Took, time.Since(start), - ) - }() - } - ringReader, err := newRingReader(ring, req, whitelist, blacklist) if err != nil { if errors.Is(err, io.EOF) { @@ -316,6 +304,19 @@ func (s *LocalObserverServer) GetFlows( return err } + if log.Enabled(context.Background(), slog.LevelDebug) { + defer func() { + log.Debug( + "GetFlows finished", + logfields.NumberOfFlows, eventsReader.eventCount, + logfields.BufferSize, ring.Cap(), + logfields.Whitelist, logFilters(req.Whitelist), + logfields.Blacklist, logFilters(req.Blacklist), + logfields.Took, time.Since(start), + ) + }() + } + fm := req.GetFieldMask() mask, err := fieldmask.New(fm) if err != nil { @@ -328,8 +329,35 @@ func (s *LocalObserverServer) GetFlows( mask.Alloc(flow.ProtoReflect()) } + // Setup a counter to rate-limit sending lost events to at most + // once every s.opts.LostEventSendInterval. + lostEventCounter := counter.NewIntervalRangeCounter(s.opts.LostEventSendInterval) + nextEvent: - for ; ; i++ { + for { + now := time.Now() + + if lostEventCounter.IsElapsed(now) { + // IsElapsed always returns false if the counter is empty, therefore + // we can trust that count is non-zero. + count := lostEventCounter.Clear() + resp := &observerpb.GetFlowsResponse{ + Time: timestamppb.New(now), + NodeName: nodeTypes.GetAbsoluteNodeName(), + ResponseTypes: &observerpb.GetFlowsResponse_LostEvents{ + LostEvents: &flowpb.LostEvent{ + Source: flowpb.LostEventSource_HUBBLE_RING_BUFFER, + NumEventsLost: count.Count, + First: timestamppb.New(count.First), + Last: timestamppb.New(count.Last), + }, + }, + } + if err = server.Send(resp); err != nil { + return err + } + } + e, err := eventsReader.Next(ctx) if err != nil { if errors.Is(err, io.EOF) { @@ -370,12 +398,21 @@ nextEvent: // when a query asks for 20 events, then lost events should not be // accounted for as they are not events per se but an indication // that some event was lost). - resp = &observerpb.GetFlowsResponse{ - Time: e.Timestamp, - NodeName: nodeTypes.GetAbsoluteNodeName(), - ResponseTypes: &observerpb.GetFlowsResponse_LostEvents{ - LostEvents: ev, - }, + + // We only want to rate-limit lost events that originate from the + // Hubble ring buffer. Other lost events should be rate-limited closer to + // the emitting source, if needed. + switch ev.Source { + case flowpb.LostEventSource_HUBBLE_RING_BUFFER: + lostEventCounter.Increment(now) + default: + resp = &observerpb.GetFlowsResponse{ + Time: e.Timestamp, + NodeName: nodeTypes.GetAbsoluteNodeName(), + ResponseTypes: &observerpb.GetFlowsResponse_LostEvents{ + LostEvents: ev, + }, + } } } @@ -408,18 +445,6 @@ func (s *LocalObserverServer) GetAgentEvents( log := s.GetLogger() ring := s.GetRingBuffer() - i := uint64(0) - if log.Enabled(context.Background(), slog.LevelDebug) { - defer func() { - log.Debug( - "GetAgentEvents finished", - logfields.NumberOfAgentEvents, i, - logfields.BufferSize, ring.Cap(), - logfields.Took, time.Since(start), - ) - }() - } - ringReader, err := newRingReader(ring, req, whitelist, blacklist) if err != nil { if errors.Is(err, io.EOF) { @@ -433,7 +458,18 @@ func (s *LocalObserverServer) GetAgentEvents( return err } - for ; ; i++ { + if log.Enabled(context.Background(), slog.LevelDebug) { + defer func() { + log.Debug( + "GetAgentEvents finished", + logfields.NumberOfAgentEvents, eventsReader.eventCount, + logfields.BufferSize, ring.Cap(), + logfields.Took, time.Since(start), + ) + }() + } + + for { e, err := eventsReader.Next(ctx) if err != nil { if errors.Is(err, io.EOF) { @@ -476,18 +512,6 @@ func (s *LocalObserverServer) GetDebugEvents( log := s.GetLogger() ring := s.GetRingBuffer() - i := uint64(0) - if log.Enabled(context.Background(), slog.LevelDebug) { - defer func() { - log.Debug( - "GetDebugEvents finished", - logfields.NumberOfDebugEvents, i, - logfields.BufferSize, ring.Cap(), - logfields.Took, time.Since(start), - ) - }() - } - ringReader, err := newRingReader(ring, req, whitelist, blacklist) if err != nil { if errors.Is(err, io.EOF) { @@ -501,7 +525,18 @@ func (s *LocalObserverServer) GetDebugEvents( return err } - for ; ; i++ { + if log.Enabled(context.Background(), slog.LevelDebug) { + defer func() { + log.Debug( + "GetDebugEvents finished", + logfields.NumberOfDebugEvents, eventsReader.eventCount, + logfields.BufferSize, ring.Cap(), + logfields.Took, time.Since(start), + ) + }() + } + + for { e, err := eventsReader.Next(ctx) if err != nil { if errors.Is(err, io.EOF) { @@ -550,15 +585,23 @@ var ( _ genericRequest = (*observerpb.GetDebugEventsRequest)(nil) ) -// eventsReader reads flows using a RingReader. It applies the GetFlows request +// eventsReader reads events using a RingReader. It applies the request // criteria (blacklist, whitelist, follow, ...) before returning events. type eventsReader struct { - ringReader *container.RingReader + ringReader *container.RingReader + + // request criteria whitelist, blacklist filters.FilterFuncs maxEvents uint64 follow, timeRange bool - eventCount uint64 since, until *time.Time + + // eventCount is updated by the reader user to keep track of how many + // successful reads happened. This is because the reader does not know + // the underlying Event type the user is looking for. When maxEvents is + // non-zero, we use this counter to infer when the limit is reached from + // Next() and return io.EOF. + eventCount uint64 } // newEventsReader creates a new eventsReader that uses the given RingReader to @@ -661,13 +704,13 @@ func (r *eventsReader) Next(ctx context.Context) (*v1.Event, error) { func (s *LocalObserverServer) trackNamespaces(flow *flowpb.Flow) { // track namespaces seen. if srcNs := flow.GetSource().GetNamespace(); srcNs != "" { - s.namespaceManager.AddNamespace(&observerpb.Namespace{ + s.nsManager.AddNamespace(&observerpb.Namespace{ Namespace: srcNs, Cluster: nodeTypes.GetClusterName(), }) } if dstNs := flow.GetDestination().GetNamespace(); dstNs != "" { - s.namespaceManager.AddNamespace(&observerpb.Namespace{ + s.nsManager.AddNamespace(&observerpb.Namespace{ Namespace: dstNs, Cluster: nodeTypes.GetClusterName(), }) diff --git a/pkg/hubble/observer/local_observer_test.go b/pkg/hubble/observer/local_observer_test.go index edf76e8072c53..84543acb783c1 100644 --- a/pkg/hubble/observer/local_observer_test.go +++ b/pkg/hubble/observer/local_observer_test.go @@ -24,6 +24,7 @@ import ( observerpb "github.com/cilium/cilium/api/v1/observer" hubv1 "github.com/cilium/cilium/pkg/hubble/api/v1" "github.com/cilium/cilium/pkg/hubble/container" + "github.com/cilium/cilium/pkg/hubble/observer/namespace" "github.com/cilium/cilium/pkg/hubble/observer/observeroption" observerTypes "github.com/cilium/cilium/pkg/hubble/observer/types" "github.com/cilium/cilium/pkg/hubble/parser" @@ -34,13 +35,9 @@ import ( "github.com/cilium/cilium/pkg/node/types" ) -var ( - nsManager = NewNamespaceManager() -) - -func noopParser(t *testing.T) *parser.Parser { +func noopParser(tb testing.TB) *parser.Parser { pp, err := parser.New( - hivetest.Logger(t), + hivetest.Logger(tb), &testutils.NoopEndpointGetter, &testutils.NoopIdentityGetter, &testutils.NoopDNSGetter, @@ -49,13 +46,13 @@ func noopParser(t *testing.T) *parser.Parser { &testutils.NoopLinkGetter, &testutils.NoopPodMetadataGetter, ) - require.NoError(t, err) + require.NoError(tb, err) return pp } func TestNewLocalServer(t *testing.T) { - pp := noopParser(t) - s, err := NewLocalServer(pp, nsManager, hivetest.Logger(t)) + pp, nm := noopParser(t), testutils.NoopNamespaceManager + s, err := NewLocalServer(pp, nm, hivetest.Logger(t)) require.NoError(t, err) assert.NotNil(t, s.GetStopped()) assert.NotNil(t, s.GetPayloadParser()) @@ -65,8 +62,8 @@ func TestNewLocalServer(t *testing.T) { } func TestLocalObserverServer_ServerStatus(t *testing.T) { - pp := noopParser(t) - s, err := NewLocalServer(pp, nsManager, hivetest.Logger(t), observeroption.WithMaxFlows(container.Capacity1)) + pp, nm := noopParser(t), testutils.NoopNamespaceManager + s, err := NewLocalServer(pp, nm, hivetest.Logger(t), observeroption.WithMaxFlows(container.Capacity1)) require.NoError(t, err) res, err := s.ServerStatus(t.Context(), &observerpb.ServerStatusRequest{}) require.NoError(t, err) @@ -217,8 +214,8 @@ func TestLocalObserverServer_GetFlows(t *testing.T) { }, } - pp := noopParser(t) - s, err := NewLocalServer(pp, nsManager, hivetest.Logger(t), + pp, nm := noopParser(t), testutils.NoopNamespaceManager + s, err := NewLocalServer(pp, nm, hivetest.Logger(t), observeroption.WithMaxFlows(container.Capacity127), observeroption.WithMonitorBuffer(queueSize), ) @@ -356,8 +353,8 @@ func TestLocalObserverServer_GetAgentEvents(t *testing.T) { }, } - pp := noopParser(t) - s, err := NewLocalServer(pp, nsManager, hivetest.Logger(t), + pp, nm := noopParser(t), testutils.NoopNamespaceManager + s, err := NewLocalServer(pp, nm, hivetest.Logger(t), observeroption.WithMonitorBuffer(queueSize), ) require.NoError(t, err) @@ -405,8 +402,8 @@ func TestLocalObserverServer_GetFlows_Follow_Since(t *testing.T) { Follow: true, } - pp := noopParser(t) - s, err := NewLocalServer(pp, nsManager, hivetest.Logger(t), + pp, nm := noopParser(t), testutils.NoopNamespaceManager + s, err := NewLocalServer(pp, nm, hivetest.Logger(t), observeroption.WithMaxFlows(container.Capacity127), observeroption.WithMonitorBuffer(queueSize), ) @@ -503,8 +500,8 @@ func TestHooks(t *testing.T) { return false, nil } - pp := noopParser(t) - s, err := NewLocalServer(pp, nsManager, hivetest.Logger(t), + pp, nm := noopParser(t), testutils.NoopNamespaceManager + s, err := NewLocalServer(pp, nm, hivetest.Logger(t), observeroption.WithMaxFlows(container.Capacity15), observeroption.WithMonitorBuffer(queueSize), observeroption.WithOnMonitorEventFunc(onMonitorEventFirst), @@ -560,8 +557,8 @@ func TestLocalObserverServer_OnFlowDelivery(t *testing.T) { return false, nil } - pp := noopParser(t) - s, err := NewLocalServer(pp, nsManager, hivetest.Logger(t), + pp, nm := noopParser(t), testutils.NoopNamespaceManager + s, err := NewLocalServer(pp, nm, hivetest.Logger(t), observeroption.WithMaxFlows(container.Capacity127), observeroption.WithMonitorBuffer(queueSize), observeroption.WithOnFlowDeliveryFunc(onFlowDelivery), @@ -623,8 +620,8 @@ func TestLocalObserverServer_OnGetFlows(t *testing.T) { return true, nil } - pp := noopParser(t) - s, err := NewLocalServer(pp, nsManager, hivetest.Logger(t), + pp, nm := noopParser(t), testutils.NoopNamespaceManager + s, err := NewLocalServer(pp, nm, hivetest.Logger(t), observeroption.WithMaxFlows(container.Capacity127), observeroption.WithMonitorBuffer(queueSize), observeroption.WithOnFlowDeliveryFunc(onFlowDelivery), @@ -701,7 +698,8 @@ func TestLocalObserverServer_NodeLabels(t *testing.T) { } // local hubble observer setup. - s, err := NewLocalServer(noopParser(t), nsManager, hivetest.Logger(t), + pp, nm := noopParser(t), testutils.NoopNamespaceManager + s, err := NewLocalServer(pp, nm, hivetest.Logger(t), observeroption.WithOnDecodedFlow(localNodeWatcher), ) require.NoError(t, err) @@ -734,7 +732,7 @@ func TestLocalObserverServer_NodeLabels(t *testing.T) { func TestLocalObserverServer_GetNamespaces(t *testing.T) { pp := noopParser(t) - nsManager := NewNamespaceManager() + nsManager := namespace.NewManager() nsManager.AddNamespace(&observerpb.Namespace{ Namespace: "zzz", }) @@ -769,21 +767,8 @@ func TestLocalObserverServer_GetNamespaces(t *testing.T) { } func Benchmark_TrackNamespaces(b *testing.B) { - pp, err := parser.New( - hivetest.Logger(b), - &testutils.NoopEndpointGetter, - &testutils.NoopIdentityGetter, - &testutils.NoopDNSGetter, - &testutils.NoopIPGetter, - &testutils.NoopServiceGetter, - &testutils.NoopLinkGetter, - &testutils.NoopPodMetadataGetter, - ) - if err != nil { - b.Fatal(err) - } - - nsManager := NewNamespaceManager() + pp := noopParser(b) + nsManager := namespace.NewManager() s, err := NewLocalServer(pp, nsManager, hivetest.Logger(b), observeroption.WithMaxFlows(container.Capacity1)) if err != nil { b.Fatal(err) diff --git a/pkg/hubble/observer/namespace/cell.go b/pkg/hubble/observer/namespace/cell.go new file mode 100644 index 0000000000000..e87965eea08fd --- /dev/null +++ b/pkg/hubble/observer/namespace/cell.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Hubble + +package namespace + +import ( + "context" + + "github.com/cilium/hive/cell" + "github.com/cilium/hive/job" +) + +var Cell = cell.ProvidePrivate(func(jobGroup job.Group) Manager { + m := NewManager() + jobGroup.Add(job.Timer( + "hubble-namespace-cleanup", + func(_ context.Context) error { + m.cleanupNamespaces() + return nil + }, + cleanupInterval, + )) + return m +}) diff --git a/pkg/hubble/observer/namespace/defaults.go b/pkg/hubble/observer/namespace/defaults.go new file mode 100644 index 0000000000000..3a61e40cbf1dc --- /dev/null +++ b/pkg/hubble/observer/namespace/defaults.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Hubble + +package namespace + +import "time" + +const ( + // cleanupInterval is the interval at which the namespace list from the + // manager is garbage collected. + cleanupInterval = 5 * time.Minute + // namespaceTTL is the time after which a namespace is garbage collected. + namespaceTTL = 1 * time.Hour +) diff --git a/pkg/hubble/observer/namespace_manager.go b/pkg/hubble/observer/namespace/manager.go similarity index 71% rename from pkg/hubble/observer/namespace_manager.go rename to pkg/hubble/observer/namespace/manager.go index b1bb6b3e2eeb6..12e3aa7aac67a 100644 --- a/pkg/hubble/observer/namespace_manager.go +++ b/pkg/hubble/observer/namespace/manager.go @@ -1,10 +1,9 @@ // SPDX-License-Identifier: Apache-2.0 // Copyright Authors of Hubble -package observer +package namespace import ( - "context" "sort" observerpb "github.com/cilium/cilium/api/v1/observer" @@ -12,14 +11,7 @@ import ( "github.com/cilium/cilium/pkg/time" ) -var _ NamespaceManager = &namespaceManager{} - -const ( - checkNamespaceAgeFrequency = 5 * time.Minute - namespaceTTL = time.Hour -) - -type NamespaceManager interface { +type Manager interface { GetNamespaces() []*observerpb.Namespace AddNamespace(*observerpb.Namespace) } @@ -35,36 +27,24 @@ type namespaceManager struct { nowFunc func() time.Time } -func NewNamespaceManager() *namespaceManager { +// NOTE: there are still a couple of places where we need to construct a +// functional ns manager outside of Hive/Cell, i.e. testing and Hubble Relay. +func NewManager() *namespaceManager { return &namespaceManager{ namespaces: make(map[string]namespaceRecord), nowFunc: time.Now, } } -func (m *namespaceManager) Run(ctx context.Context) { - ticker := time.NewTicker(checkNamespaceAgeFrequency) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - // periodically remove any namespaces which haven't been seen in flows - // for the last hour - m.cleanupNamespaces() - } - } -} - +// cleanupNamespaces remove all namespaces not seen in flows for the last hour. func (m *namespaceManager) cleanupNamespaces() { m.mu.Lock() + defer m.mu.Unlock() for key, record := range m.namespaces { if record.added.Add(namespaceTTL).Before(m.nowFunc()) { delete(m.namespaces, key) } } - m.mu.Unlock() } func (m *namespaceManager) GetNamespaces() []*observerpb.Namespace { diff --git a/pkg/hubble/observer/namespace_manager_test.go b/pkg/hubble/observer/namespace/manager_test.go similarity index 69% rename from pkg/hubble/observer/namespace_manager_test.go rename to pkg/hubble/observer/namespace/manager_test.go index 044e3f3ed4c32..7b62a0f3684dd 100644 --- a/pkg/hubble/observer/namespace_manager_test.go +++ b/pkg/hubble/observer/namespace/manager_test.go @@ -1,7 +1,7 @@ // SPDX-License-Identifier: Apache-2.0 // Copyright Authors of Hubble -package observer +package namespace import ( "testing" @@ -15,43 +15,43 @@ import ( func TestNamespaceManager(t *testing.T) { // mock time currentTime := time.Time{} - mgr := NewNamespaceManager() + nsManager := NewManager() // use the mocked time - mgr.nowFunc = func() time.Time { + nsManager.nowFunc = func() time.Time { return currentTime } advanceTime := func(d time.Duration) { // update our currentTime currentTime = currentTime.Add(d) // trigger cleanupNamespaces after we advance time to ensure it's run - mgr.cleanupNamespaces() + nsManager.cleanupNamespaces() } // we start with no namespaces expected := []*observerpb.Namespace{} - assert.Equal(t, expected, mgr.GetNamespaces()) + assert.Equal(t, expected, nsManager.GetNamespaces()) // add a few namespaces // out of order, we'll verify it's sorted when we call GetNamespaces later - mgr.AddNamespace(&observerpb.Namespace{Namespace: "ns-2"}) - mgr.AddNamespace(&observerpb.Namespace{Namespace: "ns-1"}) + nsManager.AddNamespace(&observerpb.Namespace{Namespace: "ns-2"}) + nsManager.AddNamespace(&observerpb.Namespace{Namespace: "ns-1"}) // namespaces that we added should be returned, sorted expected = []*observerpb.Namespace{ {Namespace: "ns-1"}, {Namespace: "ns-2"}, } - assert.Equal(t, expected, mgr.GetNamespaces()) + assert.Equal(t, expected, nsManager.GetNamespaces()) // advance the clock by 1/2 the namespaceTTL and verify our namespaces are still known advanceTime(namespaceTTL / 2) - assert.Equal(t, expected, mgr.GetNamespaces()) + assert.Equal(t, expected, nsManager.GetNamespaces()) // add more namespaces now that the clock has been advanced - mgr.AddNamespace(&observerpb.Namespace{Namespace: "ns-1"}) - mgr.AddNamespace(&observerpb.Namespace{Namespace: "ns-3"}) - mgr.AddNamespace(&observerpb.Namespace{Namespace: "ns-4"}) + nsManager.AddNamespace(&observerpb.Namespace{Namespace: "ns-1"}) + nsManager.AddNamespace(&observerpb.Namespace{Namespace: "ns-3"}) + nsManager.AddNamespace(&observerpb.Namespace{Namespace: "ns-4"}) // we expect all namespaces to exist, the first 2 are 30 minutes old, and the // next two are 0 minutes old @@ -61,7 +61,7 @@ func TestNamespaceManager(t *testing.T) { {Namespace: "ns-3"}, {Namespace: "ns-4"}, } - assert.Equal(t, expected, mgr.GetNamespaces()) + assert.Equal(t, expected, nsManager.GetNamespaces()) // advance the clock another 1/2 TTL and add a minute to push us past the TTL advanceTime((namespaceTTL / 2) + time.Minute) @@ -73,11 +73,11 @@ func TestNamespaceManager(t *testing.T) { {Namespace: "ns-3"}, {Namespace: "ns-4"}, } - assert.Equal(t, expected, mgr.GetNamespaces()) + assert.Equal(t, expected, nsManager.GetNamespaces()) // advance the clock another 1/2 TTL and add a minute to push us past the TTL again advanceTime((namespaceTTL / 2) + time.Minute) // no namespaces left, nothing has been refreshed - assert.Equal(t, []*observerpb.Namespace{}, mgr.GetNamespaces()) + assert.Equal(t, []*observerpb.Namespace{}, nsManager.GetNamespaces()) } diff --git a/pkg/hubble/observer/observeroption/option.go b/pkg/hubble/observer/observeroption/option.go index 022f52839f959..064ef9a02fd3e 100644 --- a/pkg/hubble/observer/observeroption/option.go +++ b/pkg/hubble/observer/observeroption/option.go @@ -5,6 +5,7 @@ package observeroption import ( "context" + "errors" "log/slog" pb "github.com/cilium/cilium/api/v1/flow" @@ -13,6 +14,7 @@ import ( "github.com/cilium/cilium/pkg/hubble/container" "github.com/cilium/cilium/pkg/hubble/filters" observerTypes "github.com/cilium/cilium/pkg/hubble/observer/types" + "github.com/cilium/cilium/pkg/time" ) // Server gives access to the Hubble server @@ -23,8 +25,9 @@ type Server interface { // Options stores all the configurations values for the hubble server. type Options struct { - MaxFlows container.Capacity // max number of flows that can be stored in the ring buffer - MonitorBuffer int // buffer size for monitor payload + MaxFlows container.Capacity // max number of flows that can be stored in the ring buffer + MonitorBuffer int // buffer size for monitor payload + LostEventSendInterval time.Duration // interval at which lost events are sent from the Observer server, if any OnServerInit []OnServerInit // invoked when the hubble server is initialized OnMonitorEvent []OnMonitorEvent // invoked before an event is decoded @@ -138,6 +141,17 @@ func WithMaxFlows(capacity container.Capacity) Option { } } +// WithLostEventSendInterval sets the interval at which lost events are sent. +func WithLostEventSendInterval(interval time.Duration) Option { + return func(o *Options) error { + if interval <= 0 { + return errors.New("lost event send interval must be greater than 0") + } + o.LostEventSendInterval = interval + return nil + } +} + // WithOnServerInit adds a new callback to be invoked after server initialization func WithOnServerInit(f OnServerInit) Option { return func(o *Options) error { diff --git a/pkg/hubble/observer/types/types.go b/pkg/hubble/observer/types/types.go index a2e24cc16e7b9..086481f635568 100644 --- a/pkg/hubble/observer/types/types.go +++ b/pkg/hubble/observer/types/types.go @@ -61,4 +61,8 @@ type LostEvent struct { NumLostEvents uint64 // CPU is the cpu number if for events lost in the perf ring buffer CPU int + // First is the timestamp of the first event that was lost + First time.Time + // Last is the timestamp of the last event that was lost + Last time.Time } diff --git a/pkg/hubble/parser/parser.go b/pkg/hubble/parser/parser.go index a385e051987e8..611f850f78c15 100644 --- a/pkg/hubble/parser/parser.go +++ b/pkg/hubble/parser/parser.go @@ -171,13 +171,20 @@ func (p *Parser) Decode(monitorEvent *observerTypes.MonitorEvent) (*v1.Event, er return nil, errors.ErrUnknownEventType } case *observerTypes.LostEvent: - ev.Event = &pb.LostEvent{ + lostEvent := &pb.LostEvent{ Source: lostEventSourceToProto(payload.Source), NumEventsLost: payload.NumLostEvents, Cpu: &wrapperspb.Int32Value{ Value: int32(payload.CPU), }, } + if !payload.First.IsZero() { + lostEvent.First = timestamppb.New(payload.First) + } + if !payload.Last.IsZero() { + lostEvent.Last = timestamppb.New(payload.Last) + } + ev.Event = lostEvent return ev, nil case nil: return ev, errors.ErrEmptyData diff --git a/pkg/hubble/parser/threefour/parser.go b/pkg/hubble/parser/threefour/parser.go index 80ce10175eab8..75a359b9694ec 100644 --- a/pkg/hubble/parser/threefour/parser.go +++ b/pkg/hubble/parser/threefour/parser.go @@ -23,6 +23,7 @@ import ( "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/monitor" monitorAPI "github.com/cilium/cilium/pkg/monitor/api" + "github.com/cilium/cilium/pkg/option" "github.com/cilium/cilium/pkg/policy/correlation" ) @@ -270,6 +271,7 @@ func (p *Parser) Decode(data []byte, decoded *pb.Flow) error { decoded.TrafficDirection = decodeTrafficDirection(srcEndpoint.ID, dn, tn, pvn) decoded.EventType = decodeCiliumEventType(eventType, eventSubType) decoded.TraceReason = decodeTraceReason(tn) + decoded.IpTraceId = decodeIpTraceId(dn, tn) decoded.SourceService = sourceService decoded.DestinationService = destinationService decoded.PolicyMatchType = decodePolicyMatchType(pvn) @@ -615,6 +617,23 @@ func decodeTraceReason(tn *monitor.TraceNotify) pb.TraceReason { } } +func decodeIpTraceId(dn *monitor.DropNotify, tn *monitor.TraceNotify) *pb.IPTraceID { + var id uint64 + switch { + case dn != nil: + id = uint64(dn.IPTraceID) + case tn != nil: + id = uint64(tn.IPTraceID) + } + if id == 0 { + return nil + } + return &pb.IPTraceID{ + TraceId: id, + IpOptionType: uint32(option.Config.IPTracingOptionType), + } +} + func decodeSecurityIdentities(dn *monitor.DropNotify, tn *monitor.TraceNotify, pvn *monitor.PolicyVerdictNotify) ( sourceSecurityIdentiy, destinationSecurityIdentity uint32, ) { diff --git a/pkg/hubble/parser/threefour/parser_test.go b/pkg/hubble/parser/threefour/parser_test.go index 0d56128a1e976..9b1d5681a4496 100644 --- a/pkg/hubble/parser/threefour/parser_test.go +++ b/pkg/hubble/parser/threefour/parser_test.go @@ -14,6 +14,7 @@ import ( "github.com/cilium/hive/hivetest" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/gopacket/gopacket" "github.com/gopacket/gopacket/layers" "github.com/stretchr/testify/assert" @@ -413,7 +414,8 @@ func TestL34Decode(t *testing.T) { // ff02::1:ff00:b3e5 f00d::a10:0:0:9195 L3/4 d2 := []byte{ 4, 5, 168, 11, 95, 22, 242, 184, 86, 0, 0, 0, 86, 0, 0, 0, 104, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 51, 255, 0, 179, 229, 18, 145, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 51, 51, 255, 0, 179, 229, 18, 145, 6, 226, 34, 26, 134, 221, 96, 0, 0, 0, 0, 32, 58, 255, 255, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 255, 0, 179, 229, 240, 13, 0, 0, 0, 0, 0, 0, 10, 16, 0, 0, 0, 0, 145, 149, 135, 0, 80, 117, 0, 0, 0, 0, 240, 13, 0, 0, 0, @@ -498,16 +500,27 @@ func BenchmarkL34Decode(b *testing.B) { } func TestDecodeTraceNotify(t *testing.T) { - for _, c := range []struct { + testCases := []struct { Name string IsL3Device bool - }{{"L3Device", true}, {"L2Device", false}} { + }{ + { + Name: "L3Device", + IsL3Device: true, + }, + { + Name: "L2Device", + IsL3Device: false, + }, + } + + for _, c := range testCases { t.Run(c.Name, func(t *testing.T) { tn := monitor.TraceNotify{ Type: byte(monitorAPI.MessageTypeTrace), SrcLabel: 123, DstLabel: 456, - Version: monitor.TraceNotifyVersion1, + Version: monitor.TraceNotifyVersion2, } lay := []gopacket.SerializableLayer{ &layers.Ethernet{ @@ -570,78 +583,151 @@ func TestDecodeTraceNotify(t *testing.T) { } func TestDecodeDropNotify(t *testing.T) { - for _, c := range []struct { - Name string - IsL3Device bool - }{{"L3Device", true}, {"L2Device", false}} { - t.Run(c.Name, func(t *testing.T) { - dn := monitor.DropNotify{ - Type: byte(monitorAPI.MessageTypeDrop), - File: 1, // bpf_host.c - Line: 42, - SrcLabel: 123, - DstLabel: 456, - Version: monitor.DropNotifyVersion2, - } - lay := []gopacket.SerializableLayer{ - &layers.Ethernet{ - SrcMAC: net.HardwareAddr{1, 2, 3, 4, 5, 6}, - DstMAC: net.HardwareAddr{1, 2, 3, 4, 5, 6}, - EthernetType: layers.EthernetTypeIPv4, - }, - &layers.IPv4{ - Version: 4, - IHL: 5, - Length: 49, - Id: 0xCECB, - TTL: 64, - Protocol: layers.IPProtocolUDP, - SrcIP: net.IPv4(1, 2, 3, 4), - DstIP: net.IPv4(1, 2, 3, 4), - }, - &layers.UDP{ - SrcPort: 23939, - DstPort: 32412, - }, + packetBuffer := gopacket.NewSerializeBuffer() + if err := gopacket.SerializeLayers(packetBuffer, + gopacket.SerializeOptions{}, + &layers.Ethernet{ + SrcMAC: net.HardwareAddr{1, 2, 3, 4, 5, 6}, + DstMAC: net.HardwareAddr{4, 5, 6, 7, 8, 9}, + EthernetType: layers.EthernetTypeIPv4, + }, + &layers.IPv4{ + IHL: 5, + SrcIP: net.IPv4(1, 2, 3, 4), + DstIP: net.IPv4(1, 2, 3, 4), + }, + ); err != nil { + t.Fatalf("SerializeLayers(...) buffer: %v", err) + } + + SrcLabel := identity.NumericIdentity(123) + DstLabel := identity.NumericIdentity(456) + + dropNotify := func(version uint16, iptraceid ...uint64) monitor.DropNotify { + var id uint64 + if len(iptraceid) > 0 { + id = iptraceid[0] + } + + return monitor.DropNotify{ + Type: byte(monitorAPI.MessageTypeDrop), + File: 1, // bpf_host.c + Version: version, + SrcLabel: SrcLabel, + DstLabel: DstLabel, + IPTraceID: id, + } + } + identityGetter := &testutils.FakeIdentityGetter{ + OnGetIdentity: func(securityIdentity uint32) (*identity.Identity, error) { + m := map[identity.NumericIdentity][]string{ + SrcLabel: {"k8s:src=label"}, + DstLabel: {"k8s:dst=label"}, } - if c.IsL3Device { - dn.Flags = monitor.TraceNotifyFlagIsL3Device - lay = lay[1:] + v, ok := m[identity.NumericIdentity(securityIdentity)] + if !ok { + return nil, fmt.Errorf("identity not found for %d", securityIdentity) } + return &identity.Identity{Labels: labels.NewLabelsFromModel(v)}, nil + }, + } - buf := &bytes.Buffer{} - err := binary.Write(buf, byteorder.Native, &dn) - require.NoError(t, err) - buffer := gopacket.NewSerializeBuffer() - err = gopacket.SerializeLayers(buffer, gopacket.SerializeOptions{}, lay...) - require.NoError(t, err) - buf.Write(buffer.Bytes()) - require.NoError(t, err) - identityGetter := &testutils.FakeIdentityGetter{ - OnGetIdentity: func(securityIdentity uint32) (*identity.Identity, error) { - if securityIdentity == uint32(dn.SrcLabel) { - return &identity.Identity{Labels: labels.NewLabelsFromModel([]string{"k8s:src=label"})}, nil - } else if securityIdentity == uint32(dn.DstLabel) { - return &identity.Identity{Labels: labels.NewLabelsFromModel([]string{"k8s:dst=label"})}, nil - } - return nil, fmt.Errorf("identity not found for %d", securityIdentity) + testCases := []struct { + name string + dn any + srcLabels []string + dstLabels []string + want *flowpb.Flow + }{ + { + name: "v3", + dn: dropNotify(3), + srcLabels: []string{"k8s:src=label"}, + dstLabels: []string{"k8s:dst=label"}, + want: &flowpb.Flow{ + Verdict: flowpb.Verdict_DROPPED, + Ethernet: &flowpb.Ethernet{ + Source: "01:02:03:04:05:06", + Destination: "04:05:06:07:08:09", + }, + IP: &flowpb.IP{ + Source: "1.2.3.4", + Destination: "1.2.3.4", + IpVersion: flowpb.IPVersion_IPv4, + }, + Source: &flowpb.Endpoint{ + Identity: 123, + Labels: []string{"k8s:src=label"}, + }, + Destination: &flowpb.Endpoint{ + Identity: 456, + Labels: []string{"k8s:dst=label"}, + }, + Type: flowpb.FlowType_L3_L4, + EventType: &flowpb.CiliumEventType{ + Type: 1, + }, + Summary: "IPv4", + File: &flowpb.FileInfo{Name: "bpf_host.c"}, + }, + }, + { + name: "v2 with IP Trace ID", + dn: dropNotify(3, 0x12345678), + srcLabels: []string{"k8s:src=label"}, + dstLabels: []string{"k8s:dst=label"}, + want: &flowpb.Flow{ + Verdict: flowpb.Verdict_DROPPED, + Ethernet: &flowpb.Ethernet{ + Source: "01:02:03:04:05:06", + Destination: "04:05:06:07:08:09", + }, + IP: &flowpb.IP{ + Source: "1.2.3.4", + Destination: "1.2.3.4", + IpVersion: flowpb.IPVersion_IPv4, + }, + Source: &flowpb.Endpoint{ + Identity: 123, + Labels: []string{"k8s:src=label"}, + }, + Destination: &flowpb.Endpoint{ + Identity: 456, + Labels: []string{"k8s:dst=label"}, }, + Type: flowpb.FlowType_L3_L4, + EventType: &flowpb.CiliumEventType{Type: 1}, + Summary: "IPv4", + File: &flowpb.FileInfo{Name: "bpf_host.c"}, + IpTraceId: &flowpb.IPTraceID{ + TraceId: 0x12345678, + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + n := tc.dn + buf := &bytes.Buffer{} + if err := binary.Write(buf, byteorder.Native, n); err != nil { + t.Fatalf("Write(...) %T to buffer: %v", n, err) } + buf.Write(packetBuffer.Bytes()) parser, err := New(hivetest.Logger(t), &testutils.NoopEndpointGetter, identityGetter, &testutils.NoopDNSGetter, &testutils.NoopIPGetter, &testutils.NoopServiceGetter, &testutils.NoopLinkGetter) - require.NoError(t, err) + if err != nil { + t.Fatalf("New parser: %v", err) + } f := &flowpb.Flow{} - err = parser.Decode(buf.Bytes(), f) - require.NoError(t, err) - assert.Equal(t, []string{"k8s:src=label"}, f.GetSource().GetLabels()) - assert.Equal(t, []string{"k8s:dst=label"}, f.GetDestination().GetLabels()) - assert.NotNil(t, f.GetFile()) - assert.Equal(t, "bpf_host.c", f.GetFile().GetName()) - assert.Equal(t, uint32(42), f.GetFile().GetLine()) - assert.Equal(t, uint32(23939), f.GetL4().GetUDP().GetSourcePort()) - assert.Equal(t, uint32(32412), f.GetL4().GetUDP().GetDestinationPort()) + if err := parser.Decode(buf.Bytes(), f); err != nil { + t.Fatalf("parser.Decode(bytes, f): %v", err) + } + + if diff := cmp.Diff(tc.want, f, cmpopts.IgnoreFields(flowpb.Flow{}, "File"), protocmp.Transform()); diff != "" { + t.Errorf("Unexpected diff (-want +got):\n%s", diff) + } }) } } @@ -2188,6 +2274,46 @@ func TestDecode_TraceNotify(t *testing.T) { TraceObservationPoint: flowpb.TraceObservationPoint_TO_STACK, }, }, + { + name: "v2_from_lxc", + event: monitor.TraceNotify{ + Type: byte(monitorAPI.MessageTypeTrace), + Source: localEP, + ObsPoint: monitorAPI.TraceFromLxc, + Reason: monitor.TraceReasonUnknown, + Version: monitor.TraceNotifyVersion2, + }, + ipTuple: egressTuple, + want: &flowpb.Flow{ + EventType: &flowpb.CiliumEventType{ + SubType: 5, + }, + Source: &flowpb.Endpoint{ID: 1234}, + TraceObservationPoint: flowpb.TraceObservationPoint_FROM_ENDPOINT, + }, + }, + { + name: "v2_from_lxc_with_ip_trace_id", + event: monitor.TraceNotify{ + Type: byte(monitorAPI.MessageTypeTrace), + Source: localEP, + ObsPoint: monitorAPI.TraceFromLxc, + Reason: monitor.TraceReasonUnknown, + Version: monitor.TraceNotifyVersion2, + IPTraceID: 1234, + }, + ipTuple: egressTuple, + want: &flowpb.Flow{ + EventType: &flowpb.CiliumEventType{ + SubType: 5, + }, + Source: &flowpb.Endpoint{ID: 1234}, + TraceObservationPoint: flowpb.TraceObservationPoint_FROM_ENDPOINT, + IpTraceId: &flowpb.IPTraceID{ + TraceId: 1234, + }, + }, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/hubble/relay/observer/server.go b/pkg/hubble/relay/observer/server.go index d6343fb589b44..a1f089e731a8d 100644 --- a/pkg/hubble/relay/observer/server.go +++ b/pkg/hubble/relay/observer/server.go @@ -16,7 +16,7 @@ import ( observerpb "github.com/cilium/cilium/api/v1/observer" relaypb "github.com/cilium/cilium/api/v1/relay" "github.com/cilium/cilium/pkg/hubble/build" - "github.com/cilium/cilium/pkg/hubble/observer" + "github.com/cilium/cilium/pkg/hubble/observer/namespace" poolTypes "github.com/cilium/cilium/pkg/hubble/relay/pool/types" "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/logging/logfields" @@ -205,7 +205,7 @@ func (s *Server) GetNamespaces(ctx context.Context, req *observerpb.GetNamespace // results over failing on the first error g := new(errgroup.Group) - namespaceManager := observer.NewNamespaceManager() + nsManager := namespace.NewManager() for _, p := range s.peers.List() { if !isAvailable(p.Conn) { @@ -229,7 +229,7 @@ func (s *Server) GetNamespaces(ctx context.Context, req *observerpb.GetNamespace return nil } for _, ns := range nsResp.GetNamespaces() { - namespaceManager.AddNamespace(ns) + nsManager.AddNamespace(ns) } return nil }) @@ -239,7 +239,7 @@ func (s *Server) GetNamespaces(ctx context.Context, req *observerpb.GetNamespace return nil, err } - return &observerpb.GetNamespacesResponse{Namespaces: namespaceManager.GetNamespaces()}, nil + return &observerpb.GetNamespacesResponse{Namespaces: nsManager.GetNamespaces()}, nil } // ServerStatus implements observerpb.ObserverServer.ServerStatus by aggregating diff --git a/pkg/hubble/relay/server/server_test.go b/pkg/hubble/relay/server/server_test.go index e09c7d4e9290d..4261f9a3c5c1b 100644 --- a/pkg/hubble/relay/server/server_test.go +++ b/pkg/hubble/relay/server/server_test.go @@ -94,9 +94,8 @@ func getRandomEndpoint() *testutils.FakeEndpointInfo { func newHubbleObserver(t testing.TB, nodeName string, numFlows int) *observer.LocalObserverServer { queueSize := numFlows - pp := noopParser(t) - nsMgr := observer.NewNamespaceManager() - s, err := observer.NewLocalServer(pp, nsMgr, log, + pp, nm := noopParser(t), testutils.NoopNamespaceManager + s, err := observer.NewLocalServer(pp, nm, log, observeroption.WithMaxFlows(container.Capacity65535), observeroption.WithMonitorBuffer(queueSize), ) diff --git a/pkg/hubble/testutils/fake.go b/pkg/hubble/testutils/fake.go index f4a6dddb47f07..c4829724bb54c 100644 --- a/pkg/hubble/testutils/fake.go +++ b/pkg/hubble/testutils/fake.go @@ -487,3 +487,33 @@ var NoopPodMetadataGetter = FakePodMetadataGetter{ return nil }, } + +// FakeNamespaceManager is used for unit tests that need a namespace.Manager. +type FakeNamespaceManager struct { + OnGetNamespaces func() []*observerpb.Namespace + OnAddNamespace func(*observerpb.Namespace) +} + +// GetNamespaces implements namespace.Manager. +func (f *FakeNamespaceManager) GetNamespaces() []*observerpb.Namespace { + if f.OnGetNamespaces != nil { + return f.OnGetNamespaces() + } + panic("OnGetNamespaces not set") +} + +// AddNamespace implements namespace.Manager. +func (f *FakeNamespaceManager) AddNamespace(ns *observerpb.Namespace) { + if f.OnAddNamespace != nil { + f.OnAddNamespace(ns) + } + panic("OnAddNamespace not set") +} + +// NoopNamespaceManager always return an empty namespace list. +var NoopNamespaceManager = &FakeNamespaceManager{ + OnGetNamespaces: func() []*observerpb.Namespace { + return nil + }, + OnAddNamespace: func(_ *observerpb.Namespace) {}, +} diff --git a/pkg/hubble/testutils/payload_test.go b/pkg/hubble/testutils/payload_test.go index be39e24c609b9..964b869abdc20 100644 --- a/pkg/hubble/testutils/payload_test.go +++ b/pkg/hubble/testutils/payload_test.go @@ -28,15 +28,16 @@ func TestCreateL3L4Payload(t *testing.T) { // These contain TraceNotify headers plus the ethernet header of the packet // - IPv4: test with TraceNotifyVersion0 // - IPv6: test with TraceNotifyVersion1 (additional [16]bytes for empty OrigIP) - packetv4Prefix := decodeHex("0403a80b8d4598d462000000620000006800000001000000000002000000000006e9183bb275129106e2221a080045000054bfe900003f019ae2") - packetv4802Prefix := decodeHex("0403a80b8d4598d462000000620000006800000001000000000002000000000006e9183bb275129106e2221a81000202080045000054bfe900003f019ae2") - packetv6Prefix := decodeHex("0405a80b5f16f2b8560000005600010068000000000000000000000000000000000000000000000000000000000000003333ff00b3e5129106e2221a86dd6000000000203aff") - packetv6802Prefix := decodeHex("0405a80b5f16f2b8560000005600010068000000000000000000000000000000000000000000000000000000000000003333ff00b3e5129106e2221a8100020286dd6000000000203aff") + packetv4Prefix := "0403a80b8d4598d462000000620000006800000001000000000002000000000006e9183bb275129106e2221a080045000054bfe900003f019ae2" + packetv4PrefixV2 := "0403a80b8d4598d462000000620002006800000001000000000002000000000000000000000000000000000000000000f0debc9a7856341206e9183bb275129106e2221a080045000054bfe900003f019ae2" + packetv4802Prefix := "0403a80b8d4598d462000000620000006800000001000000000002000000000006e9183bb275129106e2221a81000202080045000054bfe900003f019ae2" + packetv6Prefix := "0405a80b5f16f2b8560000005600010068000000000000000000000000000000000000000000000000000000000000003333ff00b3e5129106e2221a86dd6000000000203aff" + packetv6802Prefix := "0405a80b5f16f2b8560000005600010068000000000000000000000000000000000000000000000000000000000000003333ff00b3e5129106e2221a8100020286dd6000000000203aff" // ICMPv4/v6 packets (with reversed src/dst IPs) - packetICMPv4 := decodeHex("010101010a107e4000003639225700051b7b415d0000000086bf050000000000101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f3031323334353637") - packetICMPv6Req := decodeHex("f00d0000000000000a10000000009195ff0200000000000000000001ff00b3e58700507500000000f00d0000000000000a1000000000b3e50101129106e2221a") - packetICMPv4Rev := decodeHex("0a107e400101010100003639225700051b7b415d0000000086bf050000000000101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f3031323334353637") - packetICMPv6Rev := decodeHex("ff0200000000000000000001ff00b3e5f00d0000000000000a100000000091958700507500000000f00d0000000000000a1000000000b3e50101129106e2221a") + packetICMPv4 := "010101010a107e4000003639225700051b7b415d0000000086bf050000000000101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f3031323334353637" + packetICMPv6Req := "f00d0000000000000a10000000009195ff0200000000000000000001ff00b3e58700507500000000f00d0000000000000a1000000000b3e50101129106e2221a" + packetICMPv4Rev := "0a107e400101010100003639225700051b7b415d0000000086bf050000000000101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f3031323334353637" + packetICMPv6Rev := "ff0200000000000000000001ff00b3e5f00d0000000000000a100000000091958700507500000000f00d0000000000000a1000000000b3e50101129106e2221a" // The following structs are decoded pieces of the above packets traceNotifyIPv4 := monitor.TraceNotify{ @@ -63,13 +64,25 @@ func TestCreateL3L4Payload(t *testing.T) { Reason: monitor.TraceReasonPolicy, Version: monitor.TraceNotifyVersion1, } + traceNotifyIPv4V2 := monitor.TraceNotify{ + Type: monitorAPI.MessageTypeTrace, + ObsPoint: monitorAPI.TraceToStack, + Source: 0xba8, + Hash: 0xd498458d, + OrigLen: 0x62, + CapLen: 0x62, + SrcLabel: 0x68, + DstLabel: 0x1, + Reason: monitor.TraceReasonCtReply, + Version: monitor.TraceNotifyVersion2, + IPTraceID: 0x123456789abcdef0, + } etherIPv4 := &layers.Ethernet{ EthernetType: layers.EthernetTypeIPv4, SrcMAC: net.HardwareAddr{0x12, 0x91, 0x06, 0xe2, 0x22, 0x1a}, DstMAC: net.HardwareAddr{0x06, 0xe9, 0x18, 0x3b, 0xb2, 0x75}, } - etherIPv6 := &layers.Ethernet{ EthernetType: layers.EthernetTypeIPv6, SrcMAC: net.HardwareAddr{0x12, 0x91, 0x6, 0xe2, 0x22, 0x1a}, @@ -166,7 +179,7 @@ func TestCreateL3L4Payload(t *testing.T) { msg: traceNotifyIPv4, l: []gopacket.SerializableLayer{etherIPv4, ipv4, icmpv4, icmpv4Payload}, }, - want: append(packetv4Prefix[:], packetICMPv4...), + want: decodeHex(packetv4Prefix + packetICMPv4), }, { name: "ICMPv6 Neighbor Solicitation", @@ -174,7 +187,15 @@ func TestCreateL3L4Payload(t *testing.T) { msg: traceNotifyIPv6, l: []gopacket.SerializableLayer{etherIPv6, ipv6, icmpv6, icmpv6Payload}, }, - want: append(packetv6Prefix[:], packetICMPv6Req...), + want: decodeHex(packetv6Prefix + packetICMPv6Req), + }, + { + name: "ICMPv4 Echo Reply with IP Trace", + args: args{ + msg: traceNotifyIPv4V2, + l: []gopacket.SerializableLayer{etherIPv4, ipv4, icmpv4, icmpv4Payload}, + }, + want: decodeHex(packetv4PrefixV2 + packetICMPv4), }, { name: "ICMPv4 Echo Reply Reversed", @@ -182,7 +203,7 @@ func TestCreateL3L4Payload(t *testing.T) { msg: traceNotifyIPv4, l: []gopacket.SerializableLayer{etherIPv4, ipv4Rev, icmpv4, icmpv4Payload}, }, - want: append(packetv4Prefix[:], packetICMPv4Rev...), + want: decodeHex(packetv4Prefix + packetICMPv4Rev), }, { name: "ICMPv6 Neighbor Solicitation Reversed", @@ -190,7 +211,7 @@ func TestCreateL3L4Payload(t *testing.T) { msg: traceNotifyIPv6, l: []gopacket.SerializableLayer{etherIPv6, ipv6Rev, icmpv6, icmpv6Payload}, }, - want: append(packetv6Prefix[:], packetICMPv6Rev...), + want: decodeHex(packetv6Prefix + packetICMPv6Rev), }, { name: "802.11q ICMPv4 Echo Reply", @@ -198,7 +219,7 @@ func TestCreateL3L4Payload(t *testing.T) { msg: traceNotifyIPv4, l: []gopacket.SerializableLayer{etherIPv4Dot1Q, dot1QIPv4, ipv4, icmpv4, icmpv4Payload}, }, - want: append(packetv4802Prefix[:], packetICMPv4...), + want: decodeHex(packetv4802Prefix + packetICMPv4), }, { name: "802.11q ICMPv6 Neighbor Solicitation", @@ -206,7 +227,7 @@ func TestCreateL3L4Payload(t *testing.T) { msg: traceNotifyIPv6, l: []gopacket.SerializableLayer{etherIPv6Dot1Q, dot1QIPv6, ipv6, icmpv6, icmpv6Payload}, }, - want: append(packetv6802Prefix[:], packetICMPv6Req...), + want: decodeHex(packetv6802Prefix + packetICMPv6Req), }, } for _, tt := range tests { diff --git a/pkg/identity/identity.go b/pkg/identity/identity.go index 506773edde431..a26af49e6bc8f 100644 --- a/pkg/identity/identity.go +++ b/pkg/identity/identity.go @@ -182,12 +182,6 @@ func (pair *IPIdentityPair) PrefixString() string { return ipstr + "/" + strconv.Itoa(ones) } -// RequiresGlobalIdentity returns true if the label combination requires a -// global identity -func RequiresGlobalIdentity(lbls labels.Labels) bool { - return ScopeForLabels(lbls) == IdentityScopeGlobal -} - // ScopeForLabels returns the identity scope to be used for the label set. // If all labels are either CIDR or reserved, then returns the CIDR scope. // Note: This assumes the caller has already called LookupReservedIdentityByLabels; diff --git a/pkg/identity/identity_test.go b/pkg/identity/identity_test.go index ecee7903a91e4..39e5ce7cba9b3 100644 --- a/pkg/identity/identity_test.go +++ b/pkg/identity/identity_test.go @@ -61,16 +61,6 @@ func TestIsReservedIdentity(t *testing.T) { require.False(t, NumericIdentity(123456).IsReservedIdentity()) } -func TestRequiresGlobalIdentity(t *testing.T) { - prefix := netip.MustParsePrefix("0.0.0.0/0") - require.False(t, RequiresGlobalIdentity(labels.GetCIDRLabels(prefix))) - - prefix = netip.MustParsePrefix("192.168.23.0/24") - require.False(t, RequiresGlobalIdentity(labels.GetCIDRLabels(prefix))) - - require.True(t, RequiresGlobalIdentity(labels.NewLabelsFromModel([]string{"k8s:foo=bar"}))) -} - func TestScopeForLabels(t *testing.T) { tests := []struct { lbls labels.Labels diff --git a/pkg/identity/numericidentity.go b/pkg/identity/numericidentity.go index 67c3778ed5808..f7608ff7f9817 100644 --- a/pkg/identity/numericidentity.go +++ b/pkg/identity/numericidentity.go @@ -426,23 +426,6 @@ func AddUserDefinedNumericIdentity(identity NumericIdentity, label string) error return nil } -// DelReservedNumericIdentity deletes the given Numeric Identity from the list -// of reservedIdentities. If the numeric identity is not between -// UserReservedNumericIdentity and MinimalNumericIdentity it will return -// ErrNotUserIdentity. -// Is not safe for concurrent use. -func DelReservedNumericIdentity(identity NumericIdentity) error { - if !IsUserReservedIdentity(identity) { - return ErrNotUserIdentity - } - label, ok := reservedIdentityNames[identity] - if ok { - delete(reservedIdentities, label) - delete(reservedIdentityNames, identity) - } - return nil -} - // NumericIdentity is the numeric representation of a security identity. // // Bits: diff --git a/pkg/ipam/allocator/clusterpool/clusterpool.go b/pkg/ipam/allocator/clusterpool/clusterpool.go index fb8e4ee46a106..f334da7c31d34 100644 --- a/pkg/ipam/allocator/clusterpool/clusterpool.go +++ b/pkg/ipam/allocator/clusterpool/clusterpool.go @@ -66,7 +66,7 @@ func (a *AllocatorOperator) Init(ctx context.Context, logger *slog.Logger, reg * } // Start kicks of Operator allocation. -func (a *AllocatorOperator) Start(ctx context.Context, updater ipam.CiliumNodeGetterUpdater, _ *metrics.Registry) (allocator.NodeEventHandler, error) { +func (a *AllocatorOperator) Start(ctx context.Context, updater ipam.CiliumNodeGetterUpdater, reg *metrics.Registry) (allocator.NodeEventHandler, error) { a.logger.Info( "Starting ClusterPool IP allocator", logfields.IPv4CIDRs, operatorOption.Config.ClusterPoolIPv4CIDR, @@ -78,7 +78,9 @@ func (a *AllocatorOperator) Start(ctx context.Context, updater ipam.CiliumNodeGe ) if operatorOption.Config.EnableMetrics { - iMetrics = ipamMetrics.NewTriggerMetrics(metrics.Namespace, "k8s_sync") + triggerMetrics := ipamMetrics.NewTriggerMetrics(metrics.Namespace, "k8s_sync") + triggerMetrics.Register(reg) + iMetrics = triggerMetrics } else { iMetrics = &ipamMetrics.NoOpMetricsObserver{} } diff --git a/pkg/ipam/crd.go b/pkg/ipam/crd.go index 6777876016310..3102f6d413977 100644 --- a/pkg/ipam/crd.go +++ b/pkg/ipam/crd.go @@ -532,8 +532,17 @@ func (n *nodeStore) updateLocalNodeResource(node *ciliumv2.CiliumNode) { // its resourceVersion) without updating the available IP pool. func (n *nodeStore) setOwnNodeWithoutPoolUpdate(node *ciliumv2.CiliumNode) { n.mutex.Lock() + defer n.mutex.Unlock() + + // Do not update to an inconsistent state (see updateLocalNodeResource) + if n.conf.IPAMMode() == ipamOption.IPAMENI { + if err := validateENIConfig(node); err != nil { + n.logger.Info("ENI state is not consistent yet", logfields.Error, err) + return + } + } + n.ownNode = node - n.mutex.Unlock() } // refreshNodeTrigger is called to refresh the custom resource after taking the diff --git a/pkg/ipam/multipool.go b/pkg/ipam/multipool.go index eb3bc3754e5f4..4cfad2da8265c 100644 --- a/pkg/ipam/multipool.go +++ b/pkg/ipam/multipool.go @@ -263,7 +263,7 @@ func newMultiPoolManager(logger *slog.Logger, conf *option.DaemonConfig, node ag k8sController := controller.NewManager() k8sUpdater, err := trigger.NewTrigger(trigger.Parameters{ - MinInterval: 15 * time.Second, + MinInterval: conf.IPAMCiliumNodeUpdateRate, TriggerFunc: func(reasons []string) { k8sController.TriggerController(multiPoolControllerName) }, diff --git a/pkg/k8s/apis/cilium.io/client/crds/v2alpha1/ciliumvteppolicies.yaml b/pkg/k8s/apis/cilium.io/client/crds/v2alpha1/ciliumvteppolicies.yaml new file mode 100644 index 0000000000000..da2c1714c7ba1 --- /dev/null +++ b/pkg/k8s/apis/cilium.io/client/crds/v2alpha1/ciliumvteppolicies.yaml @@ -0,0 +1,266 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: ciliumvteppolicies.cilium.io +spec: + group: cilium.io + names: + categories: + - cilium + - ciliumpolicy + kind: CiliumVtepPolicy + listKind: CiliumVtepPolicyList + plural: ciliumvteppolicies + shortNames: + - vtep-policy + singular: ciliumvteppolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + destinationCIDRs: + description: |- + DestinationCIDRs is a list of destination CIDRs for destination IP addresses. + If a destination IP matches any one CIDR, it will be selected. + items: + format: cidr + type: string + maxItems: 30 + type: array + externalVTEP: + description: ExternalVTEP is the remote VTEP outside Cilium network. + properties: + ip: + description: |- + IP is the VTEP IP (remote node terminating VXLAN tunnel) + + Example: + When set to "192.168.1.100", matching traffic will be + redirected to the VXLAN tunnel towards IP address 192.168.1.100. + format: ipv4 + type: string + mac: + description: |- + MAC is a remote MAC address on the other side of VXLAN tunnel. This is + needed to build l2 and avoid ARP. + + Example: + 00:11:22:33:44:55 that belongs to VXLAN tunnel interface on the remote side + pattern: ^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$ + type: string + type: object + selectors: + description: |- + CiliumVtepPolicyRules represents a list of rules by which traffic is + selected from/to the pods. + items: + properties: + namespaceSelector: + description: |- + Selects Namespaces using cluster-scoped labels. This field follows standard label + selector semantics; if present but empty, it selects all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nodeSelector: + description: |- + This is a label selector which selects Pods by Node. This field follows standard label + selector semantics; if present but empty, it selects all nodes. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: |- + This is a label selector which selects Pods. This field follows standard label + selector semantics; if present but empty, it selects all pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + maxItems: 30 + type: array + type: object + required: + - metadata + type: object + served: true + storage: true + subresources: {} diff --git a/pkg/k8s/apis/cilium.io/client/register.go b/pkg/k8s/apis/cilium.io/client/register.go index 216ed4b5b4c7d..939bcf637de03 100644 --- a/pkg/k8s/apis/cilium.io/client/register.go +++ b/pkg/k8s/apis/cilium.io/client/register.go @@ -93,6 +93,9 @@ const ( CPIPCRDName = k8sconstv2alpha1.CPIPKindDefinition + "/" + k8sconstv2alpha1.CustomResourceDefinitionVersion // CGCCCRDName is the full name of the CiliumGatewayClassConfig CRD. CGCCCRDName = k8sconstv2alpha1.CGCCKindDefinition + "/" + k8sconstv2alpha1.CustomResourceDefinitionVersion + + // CVPCRDName is the full name of the CiliumVtepPolicy CRD. + CVPCRDName = k8sconstv2alpha1.CVPKindDefinition + "/" + k8sconstv2alpha1.CustomResourceDefinitionVersion ) type CRDList struct { @@ -191,6 +194,10 @@ func CustomResourceDefinitionList() map[string]*CRDList { Name: CGCCCRDName, FullName: k8sconstv2alpha1.CGCCName, }, + synced.CRDResourceName(k8sconstv2alpha1.CVPName): { + Name: CVPCRDName, + FullName: k8sconstv2alpha1.CVPName, + }, } } @@ -278,6 +285,9 @@ var ( //go:embed crds/v2alpha1/ciliumgatewayclassconfigs.yaml crdsv2Alpha1CiliumGatewayClassConfigs []byte + + //go:embed crds/v2alpha1/ciliumvteppolicies.yaml + crdsv2Alpha1CiliumVtepPolicies []byte ) // GetPregeneratedCRD returns the pregenerated CRD based on the requested CRD @@ -338,6 +348,8 @@ func GetPregeneratedCRD(logger *slog.Logger, crdName string) apiextensionsv1.Cus crdBytes = crdsv2Alpha1CiliumPodIPPools case CGCCCRDName: crdBytes = crdsv2Alpha1CiliumGatewayClassConfigs + case CVPCRDName: + crdBytes = crdsv2Alpha1CiliumVtepPolicies default: logging.Fatal(logger, "Pregenerated CRD does not exist", logAttr) } diff --git a/pkg/k8s/apis/cilium.io/v2alpha1/register.go b/pkg/k8s/apis/cilium.io/v2alpha1/register.go index 3459f27ce21d9..cfb73006c51b5 100644 --- a/pkg/k8s/apis/cilium.io/v2alpha1/register.go +++ b/pkg/k8s/apis/cilium.io/v2alpha1/register.go @@ -111,6 +111,11 @@ const ( CGCCListName = "ciliumgatewayclassconfiglists" CGCCKindDefinition = "CiliumGatewayClassConfig" CGCCName = CGCCPluralName + "." + CustomResourceDefinitionGroup + + // CiliumVtepPolicy + CVPPluralName = "ciliumvteppolicies" + CVPKindDefinition = "CiliumVtepPolicy" + CVPName = CVPPluralName + "." + CustomResourceDefinitionGroup ) // SchemeGroupVersion is group version used to register these objects @@ -182,6 +187,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { // new Gateway API types &CiliumGatewayClassConfig{}, &CiliumGatewayClassConfigList{}, + + // VTEP Policy API types + &CiliumVtepPolicy{}, + &CiliumVtepPolicyList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/pkg/k8s/apis/cilium.io/v2alpha1/vtep_policy_types.go b/pkg/k8s/apis/cilium.io/v2alpha1/vtep_policy_types.go new file mode 100644 index 0000000000000..b99798798ac39 --- /dev/null +++ b/pkg/k8s/apis/cilium.io/v2alpha1/vtep_policy_types.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumvteppolicy",path="ciliumvteppolicies",scope="Cluster",shortName={vtep-policy} +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:storageversion + +type CiliumVtepPolicy struct { + // +k8s:openapi-gen=false + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +k8s:openapi-gen=false + // +deepequal-gen=false + metav1.ObjectMeta `json:"metadata"` + + Spec CiliumVtepPolicySpec `json:"spec,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=false +// +deepequal-gen=false + +// CiliumVtepPolicyList is a list of CiliumVtepPolicy objects. +type CiliumVtepPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is a list of CiliumVtepPolicy. + Items []CiliumVtepPolicy `json:"items"` +} + +// +kubebuilder:validation:Format=cidr +type CIDR string + +// +kubebuilder:validation:Pattern=`^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$` +// regex source: https://uibakery.io/regex-library/mac-address +type MAC string + +type CiliumVtepPolicySpec struct { + // +kubebuilder:validation:MaxItems=30 + // CiliumVtepPolicyRules represents a list of rules by which traffic is + // selected from/to the pods. + Selectors []CiliumVtepPolicyRules `json:"selectors,omitempty"` + + // +kubebuilder:validation:MaxItems=30 + // DestinationCIDRs is a list of destination CIDRs for destination IP addresses. + // If a destination IP matches any one CIDR, it will be selected. + DestinationCIDRs []CIDR `json:"destinationCIDRs,omitempty"` + + // ExternalVTEP is the remote VTEP outside Cilium network. + ExternalVTEP *ExternalVTEP `json:"externalVTEP,omitempty"` +} + +// External VTEP identifies the node outside cilium network that should act +// as a gateway for traffic matching the vtep policy +type ExternalVTEP struct { + // IP is the VTEP IP (remote node terminating VXLAN tunnel) + // + // Example: + // When set to "192.168.1.100", matching traffic will be + // redirected to the VXLAN tunnel towards IP address 192.168.1.100. + // + // +kubebuilder:validation:Format=ipv4 + IP string `json:"ip,omitempty"` + + // MAC is a remote MAC address on the other side of VXLAN tunnel. This is + // needed to build l2 and avoid ARP. + // + // Example: + // 00:11:22:33:44:55 that belongs to VXLAN tunnel interface on the remote side + MAC MAC `json:"mac,omitempty"` +} + +type CiliumVtepPolicyRules struct { + // Selects Namespaces using cluster-scoped labels. This field follows standard label + // selector semantics; if present but empty, it selects all namespaces. + NamespaceSelector *slimv1.LabelSelector `json:"namespaceSelector,omitempty"` + + // This is a label selector which selects Pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + PodSelector *slimv1.LabelSelector `json:"podSelector,omitempty"` + + // This is a label selector which selects Pods by Node. This field follows standard label + // selector semantics; if present but empty, it selects all nodes. + NodeSelector *slimv1.LabelSelector `json:"nodeSelector,omitempty"` +} diff --git a/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go b/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go index 49f3813ae2a9c..68a1610455fe7 100644 --- a/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go +++ b/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go @@ -1973,6 +1973,130 @@ func (in *CiliumPodIPPoolList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumVtepPolicy) DeepCopyInto(out *CiliumVtepPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumVtepPolicy. +func (in *CiliumVtepPolicy) DeepCopy() *CiliumVtepPolicy { + if in == nil { + return nil + } + out := new(CiliumVtepPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumVtepPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumVtepPolicyList) DeepCopyInto(out *CiliumVtepPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CiliumVtepPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumVtepPolicyList. +func (in *CiliumVtepPolicyList) DeepCopy() *CiliumVtepPolicyList { + if in == nil { + return nil + } + out := new(CiliumVtepPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumVtepPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumVtepPolicyRules) DeepCopyInto(out *CiliumVtepPolicyRules) { + *out = *in + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumVtepPolicyRules. +func (in *CiliumVtepPolicyRules) DeepCopy() *CiliumVtepPolicyRules { + if in == nil { + return nil + } + out := new(CiliumVtepPolicyRules) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumVtepPolicySpec) DeepCopyInto(out *CiliumVtepPolicySpec) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]CiliumVtepPolicyRules, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DestinationCIDRs != nil { + in, out := &in.DestinationCIDRs, &out.DestinationCIDRs + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } + if in.ExternalVTEP != nil { + in, out := &in.ExternalVTEP, &out.ExternalVTEP + *out = new(ExternalVTEP) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumVtepPolicySpec. +func (in *CiliumVtepPolicySpec) DeepCopy() *CiliumVtepPolicySpec { + if in == nil { + return nil + } + out := new(CiliumVtepPolicySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CoreCiliumEndpoint) DeepCopyInto(out *CoreCiliumEndpoint) { *out = *in @@ -2032,6 +2156,22 @@ func (in *EgressRule) DeepCopy() *EgressRule { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalVTEP) DeepCopyInto(out *ExternalVTEP) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalVTEP. +func (in *ExternalVTEP) DeepCopy() *ExternalVTEP { + if in == nil { + return nil + } + out := new(ExternalVTEP) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPPoolSpec) DeepCopyInto(out *IPPoolSpec) { *out = *in diff --git a/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go b/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go index 02bffc6092456..56929b177ea14 100644 --- a/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go +++ b/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go @@ -1546,6 +1546,106 @@ func (in *CiliumPodIPPool) DeepEqual(other *CiliumPodIPPool) bool { return true } +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumVtepPolicy) DeepEqual(other *CiliumVtepPolicy) bool { + if other == nil { + return false + } + + if !in.Spec.DeepEqual(&other.Spec) { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumVtepPolicyRules) DeepEqual(other *CiliumVtepPolicyRules) bool { + if other == nil { + return false + } + + if (in.NamespaceSelector == nil) != (other.NamespaceSelector == nil) { + return false + } else if in.NamespaceSelector != nil { + if !in.NamespaceSelector.DeepEqual(other.NamespaceSelector) { + return false + } + } + + if (in.PodSelector == nil) != (other.PodSelector == nil) { + return false + } else if in.PodSelector != nil { + if !in.PodSelector.DeepEqual(other.PodSelector) { + return false + } + } + + if (in.NodeSelector == nil) != (other.NodeSelector == nil) { + return false + } else if in.NodeSelector != nil { + if !in.NodeSelector.DeepEqual(other.NodeSelector) { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumVtepPolicySpec) DeepEqual(other *CiliumVtepPolicySpec) bool { + if other == nil { + return false + } + + if ((in.Selectors != nil) && (other.Selectors != nil)) || ((in.Selectors == nil) != (other.Selectors == nil)) { + in, other := &in.Selectors, &other.Selectors + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + if ((in.DestinationCIDRs != nil) && (other.DestinationCIDRs != nil)) || ((in.DestinationCIDRs == nil) != (other.DestinationCIDRs == nil)) { + in, other := &in.DestinationCIDRs, &other.DestinationCIDRs + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + if (in.ExternalVTEP == nil) != (other.ExternalVTEP == nil) { + return false + } else if in.ExternalVTEP != nil { + if !in.ExternalVTEP.DeepEqual(other.ExternalVTEP) { + return false + } + } + + return true +} + // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. func (in *CoreCiliumEndpoint) DeepEqual(other *CoreCiliumEndpoint) bool { @@ -1611,6 +1711,23 @@ func (in *EgressRule) DeepEqual(other *EgressRule) bool { return true } +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *ExternalVTEP) DeepEqual(other *ExternalVTEP) bool { + if other == nil { + return false + } + + if in.IP != other.IP { + return false + } + if in.MAC != other.MAC { + return false + } + + return true +} + // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. func (in *IPPoolSpec) DeepEqual(other *IPPoolSpec) bool { diff --git a/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go b/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go index a731d33f3445f..c517ac4a0d772 100644 --- a/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go +++ b/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go @@ -28,6 +28,7 @@ type CiliumV2alpha1Interface interface { CiliumLoadBalancerIPPoolsGetter CiliumNodeConfigsGetter CiliumPodIPPoolsGetter + CiliumVtepPoliciesGetter } // CiliumV2alpha1Client is used to interact with features provided by the cilium.io group. @@ -87,6 +88,10 @@ func (c *CiliumV2alpha1Client) CiliumPodIPPools() CiliumPodIPPoolInterface { return newCiliumPodIPPools(c) } +func (c *CiliumV2alpha1Client) CiliumVtepPolicies() CiliumVtepPolicyInterface { + return newCiliumVtepPolicies(c) +} + // NewForConfig creates a new CiliumV2alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumvteppolicy.go b/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumvteppolicy.go new file mode 100644 index 0000000000000..9f588827b16b7 --- /dev/null +++ b/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumvteppolicy.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + context "context" + + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// CiliumVtepPoliciesGetter has a method to return a CiliumVtepPolicyInterface. +// A group's client should implement this interface. +type CiliumVtepPoliciesGetter interface { + CiliumVtepPolicies() CiliumVtepPolicyInterface +} + +// CiliumVtepPolicyInterface has methods to work with CiliumVtepPolicy resources. +type CiliumVtepPolicyInterface interface { + Create(ctx context.Context, ciliumVtepPolicy *ciliumiov2alpha1.CiliumVtepPolicy, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumVtepPolicy, error) + Update(ctx context.Context, ciliumVtepPolicy *ciliumiov2alpha1.CiliumVtepPolicy, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumVtepPolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumVtepPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumVtepPolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumVtepPolicy, err error) + CiliumVtepPolicyExpansion +} + +// ciliumVtepPolicies implements CiliumVtepPolicyInterface +type ciliumVtepPolicies struct { + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumVtepPolicy, *ciliumiov2alpha1.CiliumVtepPolicyList] +} + +// newCiliumVtepPolicies returns a CiliumVtepPolicies +func newCiliumVtepPolicies(c *CiliumV2alpha1Client) *ciliumVtepPolicies { + return &ciliumVtepPolicies{ + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumVtepPolicy, *ciliumiov2alpha1.CiliumVtepPolicyList]( + "ciliumvteppolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2alpha1.CiliumVtepPolicy { return &ciliumiov2alpha1.CiliumVtepPolicy{} }, + func() *ciliumiov2alpha1.CiliumVtepPolicyList { return &ciliumiov2alpha1.CiliumVtepPolicyList{} }, + ), + } +} diff --git a/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_cilium.io_client.go b/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_cilium.io_client.go index ec3f3631dfef4..524fa3264fc40 100644 --- a/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_cilium.io_client.go +++ b/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_cilium.io_client.go @@ -67,6 +67,10 @@ func (c *FakeCiliumV2alpha1) CiliumPodIPPools() v2alpha1.CiliumPodIPPoolInterfac return newFakeCiliumPodIPPools(c) } +func (c *FakeCiliumV2alpha1) CiliumVtepPolicies() v2alpha1.CiliumVtepPolicyInterface { + return newFakeCiliumVtepPolicies(c) +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeCiliumV2alpha1) RESTClient() rest.Interface { diff --git a/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumvteppolicy.go b/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumvteppolicy.go new file mode 100644 index 0000000000000..a544adc09165c --- /dev/null +++ b/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumvteppolicy.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1" + gentype "k8s.io/client-go/gentype" +) + +// fakeCiliumVtepPolicies implements CiliumVtepPolicyInterface +type fakeCiliumVtepPolicies struct { + *gentype.FakeClientWithList[*v2alpha1.CiliumVtepPolicy, *v2alpha1.CiliumVtepPolicyList] + Fake *FakeCiliumV2alpha1 +} + +func newFakeCiliumVtepPolicies(fake *FakeCiliumV2alpha1) ciliumiov2alpha1.CiliumVtepPolicyInterface { + return &fakeCiliumVtepPolicies{ + gentype.NewFakeClientWithList[*v2alpha1.CiliumVtepPolicy, *v2alpha1.CiliumVtepPolicyList]( + fake.Fake, + "", + v2alpha1.SchemeGroupVersion.WithResource("ciliumvteppolicies"), + v2alpha1.SchemeGroupVersion.WithKind("CiliumVtepPolicy"), + func() *v2alpha1.CiliumVtepPolicy { return &v2alpha1.CiliumVtepPolicy{} }, + func() *v2alpha1.CiliumVtepPolicyList { return &v2alpha1.CiliumVtepPolicyList{} }, + func(dst, src *v2alpha1.CiliumVtepPolicyList) { dst.ListMeta = src.ListMeta }, + func(list *v2alpha1.CiliumVtepPolicyList) []*v2alpha1.CiliumVtepPolicy { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v2alpha1.CiliumVtepPolicyList, items []*v2alpha1.CiliumVtepPolicy) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go b/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go index 5a1d72f1f9ece..09505ad8bab81 100644 --- a/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go +++ b/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go @@ -30,3 +30,5 @@ type CiliumLoadBalancerIPPoolExpansion interface{} type CiliumNodeConfigExpansion interface{} type CiliumPodIPPoolExpansion interface{} + +type CiliumVtepPolicyExpansion interface{} diff --git a/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumvteppolicy.go b/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumvteppolicy.go new file mode 100644 index 0000000000000..b05aba8144792 --- /dev/null +++ b/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumvteppolicy.go @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by informer-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + context "context" + time "time" + + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumVtepPolicyInformer provides access to a shared informer and lister for +// CiliumVtepPolicies. +type CiliumVtepPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() ciliumiov2alpha1.CiliumVtepPolicyLister +} + +type ciliumVtepPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCiliumVtepPolicyInformer constructs a new informer for CiliumVtepPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCiliumVtepPolicyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCiliumVtepPolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCiliumVtepPolicyInformer constructs a new informer for CiliumVtepPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCiliumVtepPolicyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumVtepPolicies().List(context.Background(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumVtepPolicies().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumVtepPolicies().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumVtepPolicies().Watch(ctx, options) + }, + }, + &apisciliumiov2alpha1.CiliumVtepPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *ciliumVtepPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCiliumVtepPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ciliumVtepPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumVtepPolicy{}, f.defaultInformer) +} + +func (f *ciliumVtepPolicyInformer) Lister() ciliumiov2alpha1.CiliumVtepPolicyLister { + return ciliumiov2alpha1.NewCiliumVtepPolicyLister(f.Informer().GetIndexer()) +} diff --git a/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/interface.go b/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/interface.go index 77d88357ec4c7..ae965ed2cc84d 100644 --- a/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/interface.go +++ b/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/interface.go @@ -37,6 +37,8 @@ type Interface interface { CiliumNodeConfigs() CiliumNodeConfigInformer // CiliumPodIPPools returns a CiliumPodIPPoolInformer. CiliumPodIPPools() CiliumPodIPPoolInformer + // CiliumVtepPolicies returns a CiliumVtepPolicyInformer. + CiliumVtepPolicies() CiliumVtepPolicyInformer } type version struct { @@ -114,3 +116,8 @@ func (v *version) CiliumNodeConfigs() CiliumNodeConfigInformer { func (v *version) CiliumPodIPPools() CiliumPodIPPoolInformer { return &ciliumPodIPPoolInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// CiliumVtepPolicies returns a CiliumVtepPolicyInformer. +func (v *version) CiliumVtepPolicies() CiliumVtepPolicyInformer { + return &ciliumVtepPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/k8s/client/informers/externalversions/generic.go b/pkg/k8s/client/informers/externalversions/generic.go index dba5af24c7b8b..a873198bde408 100644 --- a/pkg/k8s/client/informers/externalversions/generic.go +++ b/pkg/k8s/client/informers/externalversions/generic.go @@ -103,6 +103,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumNodeConfigs().Informer()}, nil case v2alpha1.SchemeGroupVersion.WithResource("ciliumpodippools"): return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumPodIPPools().Informer()}, nil + case v2alpha1.SchemeGroupVersion.WithResource("ciliumvteppolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumVtepPolicies().Informer()}, nil } diff --git a/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumvteppolicy.go b/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumvteppolicy.go new file mode 100644 index 0000000000000..325977432416a --- /dev/null +++ b/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumvteppolicy.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by lister-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumVtepPolicyLister helps list CiliumVtepPolicies. +// All objects returned here must be treated as read-only. +type CiliumVtepPolicyLister interface { + // List lists all CiliumVtepPolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumVtepPolicy, err error) + // Get retrieves the CiliumVtepPolicy from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*ciliumiov2alpha1.CiliumVtepPolicy, error) + CiliumVtepPolicyListerExpansion +} + +// ciliumVtepPolicyLister implements the CiliumVtepPolicyLister interface. +type ciliumVtepPolicyLister struct { + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumVtepPolicy] +} + +// NewCiliumVtepPolicyLister returns a new CiliumVtepPolicyLister. +func NewCiliumVtepPolicyLister(indexer cache.Indexer) CiliumVtepPolicyLister { + return &ciliumVtepPolicyLister{listers.New[*ciliumiov2alpha1.CiliumVtepPolicy](indexer, ciliumiov2alpha1.Resource("ciliumvteppolicy"))} +} diff --git a/pkg/k8s/client/listers/cilium.io/v2alpha1/expansion_generated.go b/pkg/k8s/client/listers/cilium.io/v2alpha1/expansion_generated.go index fde475b7392a9..978e795047212 100644 --- a/pkg/k8s/client/listers/cilium.io/v2alpha1/expansion_generated.go +++ b/pkg/k8s/client/listers/cilium.io/v2alpha1/expansion_generated.go @@ -60,3 +60,7 @@ type CiliumNodeConfigNamespaceListerExpansion interface{} // CiliumPodIPPoolListerExpansion allows custom methods to be added to // CiliumPodIPPoolLister. type CiliumPodIPPoolListerExpansion interface{} + +// CiliumVtepPolicyListerExpansion allows custom methods to be added to +// CiliumVtepPolicyLister. +type CiliumVtepPolicyListerExpansion interface{} diff --git a/pkg/k8s/client/testutils/fake.go b/pkg/k8s/client/testutils/fake.go index 4f716f7e37492..310bcf04a3348 100644 --- a/pkg/k8s/client/testutils/fake.go +++ b/pkg/k8s/client/testutils/fake.go @@ -31,7 +31,6 @@ import ( mcsapi_fake "sigs.k8s.io/mcs-api/pkg/client/clientset/versioned/fake" k8sYaml "sigs.k8s.io/yaml" - "github.com/cilium/cilium/pkg/container" k8sclient "github.com/cilium/cilium/pkg/k8s/client" cilium_fake "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake" slim_clientset "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned" @@ -77,6 +76,8 @@ type FakeClientset struct { *APIExtFakeClientset k8sclient.ClientsetGetters + ot *statedbObjectTracker + SlimFakeClientset *SlimFakeClientset trackers []struct { @@ -156,6 +157,7 @@ func NewFakeClientsetWithVersion(log *slog.Logger, ot *statedbObjectTracker, ver client.SlimFakeClientset.Resources = resources client.CiliumFakeClientset.Resources = resources client.APIExtFakeClientset.Resources = resources + client.ot = ot otx := ot.For("*", testutils.Scheme, testutils.Decoder()) prependReactors(client.SlimFakeClientset, otx) @@ -238,9 +240,6 @@ func showGVR(gvr schema.GroupVersionResource) string { } func FakeClientCommands(fc *FakeClientset) map[string]script.Cmd { - // Use a InsertOrderedMap to keep e.g. k8s/summary output stable. - seenResources := container.NewInsertOrderedMap[schema.GroupVersionKind, schema.GroupVersionResource]() - addUpdateOrDelete := func(s *script.State, action string, files []string) error { for _, file := range files { b, err := os.ReadFile(s.Path(file)) @@ -261,7 +260,6 @@ func FakeClientCommands(fc *FakeClientset) map[string]script.Cmd { if err != nil { return fmt.Errorf("accessor: %w", err) } - seenResources.Insert(*gvk, gvr) name := objMeta.GetName() ns := objMeta.GetNamespace() @@ -373,14 +371,14 @@ func FakeClientCommands(fc *FakeClientset) map[string]script.Cmd { } var gvr schema.GroupVersionResource - for _, r := range seenResources.All() { - res := showGVR(r) + for gvrk := range fc.ot.getGVRKs() { + res := showGVR(gvrk.GroupVersionResource) if res == args[0] { - gvr = r + gvr = gvrk.GroupVersionResource break } else if strings.Contains(res, args[0]) { s.Logf("Using closest match %q\n", res) - gvr = r + gvr = gvrk.GroupVersionResource break } } @@ -435,16 +433,16 @@ func FakeClientCommands(fc *FakeClientset) map[string]script.Cmd { var gvr schema.GroupVersionResource var gvk schema.GroupVersionKind - for k, r := range seenResources.All() { - res := showGVR(r) + for gvrk := range fc.ot.getGVRKs() { + res := showGVR(gvrk.GroupVersionResource) if res == args[0] { - gvr = r - gvk = k + gvr = gvrk.GroupVersionResource + gvk = gvrk.groupVersionKind() break } else if strings.Contains(res, args[0]) { s.Logf("Using closest match %q\n", res) - gvr = r - gvk = k + gvr = gvrk.GroupVersionResource + gvk = gvrk.groupVersionKind() break } } @@ -490,11 +488,11 @@ func FakeClientCommands(fc *FakeClientset) map[string]script.Cmd { } for _, tc := range fc.trackers { fmt.Fprintf(out, "%s:\n", tc.domain) - for gvk, gvr := range seenResources.All() { - objs, err := tc.tracker.List(gvr, gvk, "") + for gvrk := range fc.ot.getGVRKs() { + objs, err := tc.tracker.List(gvrk.GroupVersionResource, gvrk.groupVersionKind(), "") if err == nil { lst, _ := meta.ExtractList(objs) - fmt.Fprintf(out, "- %s: %d\n", showGVR(gvr), len(lst)) + fmt.Fprintf(out, "- %s: %d\n", showGVR(gvrk.GroupVersionResource), len(lst)) } } } @@ -509,8 +507,8 @@ func FakeClientCommands(fc *FakeClientset) map[string]script.Cmd { func(s *script.State, args ...string) (script.WaitFunc, error) { return func(s *script.State) (stdout string, stderr string, err error) { var buf strings.Builder - for _, gvr := range seenResources.All() { - fmt.Fprintf(&buf, "%s\n", showGVR(gvr)) + for gvrk := range fc.ot.getGVRKs() { + fmt.Fprintf(&buf, "%s\n", showGVR(gvrk.GroupVersionResource)) } stdout = buf.String() return diff --git a/pkg/k8s/client/testutils/object_tracker.go b/pkg/k8s/client/testutils/object_tracker.go index 9cc490647729e..d97edda5eb130 100644 --- a/pkg/k8s/client/testutils/object_tracker.go +++ b/pkg/k8s/client/testutils/object_tracker.go @@ -6,6 +6,7 @@ package testutils import ( "encoding/json" "fmt" + "iter" "log/slog" "reflect" "strconv" @@ -28,6 +29,7 @@ import ( "k8s.io/client-go/testing" "k8s.io/client-go/util/jsonpath" + "github.com/cilium/cilium/pkg/container" "github.com/cilium/cilium/pkg/k8s/testutils" "github.com/cilium/cilium/pkg/logging/logfields" ) @@ -72,6 +74,7 @@ func newStateDBObjectTracker(db *statedb.DB, log *slog.Logger) (*statedbObjectTr type object struct { objectId + kind string deleted bool o runtime.Object } @@ -132,6 +135,30 @@ var ( } ) +type gvrk struct { + schema.GroupVersionResource + kind string +} + +func (g gvrk) groupVersionKind() schema.GroupVersionKind { + return schema.GroupVersionKind{ + Group: g.Group, + Version: g.Version, + Kind: g.kind, + } +} + +func (s *statedbObjectTracker) getGVRKs() iter.Seq[gvrk] { + rs := container.NewInsertOrderedMap[schema.GroupVersionResource, gvrk]() + for obj := range s.tbl.All(s.db.ReadTxn()) { + rs.Insert(obj.gvr, gvrk{ + GroupVersionResource: obj.gvr, + kind: obj.kind, + }) + } + return rs.Values() +} + // For returns a object tracker for a specific use-case (domain) that is separate from others. func (s *statedbObjectTracker) For(domain string, scheme *runtime.Scheme, decoder runtime.Decoder) *statedbObjectTracker { o := *s @@ -162,6 +189,16 @@ func (s *statedbObjectTracker) addList(obj runtime.Object) error { return nil } +// fillTypeMetaIfNeeded sets the [metav1.TypeMeta] in the object if it's not already set based +// on the GroupVersionKind found from the schema. +func fillTypeMetaIfNeeded(obj runtime.Object, gvk schema.GroupVersionKind) runtime.Object { + if obj.GetObjectKind().GroupVersionKind().Empty() { + obj = obj.DeepCopyObject() + obj.GetObjectKind().SetGroupVersionKind(gvk) + } + return obj +} + // Add adds an object to the tracker. If object being added // is a list, its items are added separately. func (s *statedbObjectTracker) Add(obj runtime.Object) error { @@ -188,15 +225,12 @@ func (s *statedbObjectTracker) Add(obj runtime.Object) error { return err } - if partial, ok := obj.(*metav1.PartialObjectMetadata); ok && len(partial.TypeMeta.APIVersion) > 0 { - gvks = []schema.GroupVersionKind{partial.TypeMeta.GroupVersionKind()} - } - if len(gvks) == 0 { err := fmt.Errorf("no registered kinds for %v", obj) s.log.Debug("Add", logfields.Error, err) return err } + for _, gvk := range gvks { // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The // actual registration in apiserver can specify arbitrary route for a @@ -209,6 +243,8 @@ func (s *statedbObjectTracker) Add(obj runtime.Object) error { gvr.Version = "" } + obj = fillTypeMetaIfNeeded(obj, gvk) + s.log.Debug( "Add", logfieldClientset, s.domain, @@ -219,7 +255,9 @@ func (s *statedbObjectTracker) Add(obj runtime.Object) error { s.tbl.Insert(wtxn, object{ objectId: newObjectId(s.domain, gvr, objMeta.GetNamespace(), objMeta.GetName()), - o: obj}) + o: obj, + kind: gvk.Kind, + }) } return nil } @@ -282,6 +320,18 @@ func (s *statedbObjectTracker) Create(gvr schema.GroupVersionResource, obj runti logfieldClientset, s.domain, logfields.Object, obj) + gvks, _, err := s.scheme.ObjectKinds(obj) + if err != nil { + s.log.Debug("Create", logfields.Error, err) + return err + } + if len(gvks) == 0 { + err = fmt.Errorf("no kind found for %+v", gvr) + s.log.Debug("Create", logfields.Error, err) + return err + } + gvk := gvks[0] + obj = obj.DeepCopyObject() newMeta, err := meta.Accessor(obj) if err != nil { @@ -291,12 +341,16 @@ func (s *statedbObjectTracker) Create(gvr schema.GroupVersionResource, obj runti newMeta.SetNamespace(ns) } + obj = fillTypeMetaIfNeeded(obj, gvks[0]) + wtxn := s.db.WriteTxn(s.tbl) version := s.tbl.Revision(wtxn) + 1 newMeta.SetResourceVersion(strconv.FormatUint(version, 10)) old, found, _ := s.tbl.Insert(wtxn, object{ objectId: newObjectId(s.domain, gvr, ns, newMeta.GetName()), - o: obj}) + o: obj, + kind: gvk.Kind, + }) if found && !old.deleted { wtxn.Abort() gr := gvr.GroupResource() @@ -370,6 +424,9 @@ func (s *statedbObjectTracker) List(gvr schema.GroupVersionResource, gvk schema. } list, err := s.scheme.New(listGVK) + if err != nil { + list, err = testutils.KubernetesScheme.New(listGVK) + } if err != nil { return nil, err } @@ -446,6 +503,18 @@ func (s *statedbObjectTracker) Update(gvr schema.GroupVersionResource, obj runti } func (s *statedbObjectTracker) updateOrPatch(what string, gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error { + gvks, _, err := s.scheme.ObjectKinds(obj) + if err != nil { + s.log.Debug(what, logfields.Error, err) + return err + } + if len(gvks) == 0 { + err = fmt.Errorf("no kind found for %+v", gvr) + s.log.Debug(what, logfields.Error, err) + return err + } + gvk := gvks[0] + obj = obj.DeepCopyObject() newMeta, err := meta.Accessor(obj) if err != nil { @@ -455,6 +524,9 @@ func (s *statedbObjectTracker) updateOrPatch(what string, gvr schema.GroupVersio if len(newMeta.GetNamespace()) == 0 { newMeta.SetNamespace(ns) } + + obj = fillTypeMetaIfNeeded(obj, gvks[0]) + wtxn := s.db.WriteTxn(s.tbl) version := s.tbl.Revision(wtxn) + 1 newMeta.SetResourceVersion(strconv.FormatUint(version, 10)) @@ -464,7 +536,9 @@ func (s *statedbObjectTracker) updateOrPatch(what string, gvr schema.GroupVersio logfields.Object, obj, logfieldResourceVersion, version) - oldObj, found, _ := s.tbl.Insert(wtxn, object{objectId: newObjectId(s.domain, gvr, ns, newMeta.GetName()), o: obj}) + oldObj, found, _ := s.tbl.Insert(wtxn, + object{objectId: newObjectId(s.domain, gvr, ns, newMeta.GetName()), o: obj, kind: gvk.Kind}, + ) if !found || oldObj.deleted { wtxn.Abort() gr := gvr.GroupResource() diff --git a/pkg/k8s/client/testutils/object_tracker_test.go b/pkg/k8s/client/testutils/object_tracker_test.go new file mode 100644 index 0000000000000..a6e11bb75a450 --- /dev/null +++ b/pkg/k8s/client/testutils/object_tracker_test.go @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package testutils + +import ( + "log/slog" + "testing" + + "github.com/cilium/hive/hivetest" + "github.com/cilium/statedb" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +func TestStateDBObjectTracker_fillTypeMeta(t *testing.T) { + db := statedb.New() + log := hivetest.Logger(t, hivetest.LogLevel(slog.LevelDebug)) + + ot, err := newStateDBObjectTracker(db, log) + require.NoError(t, err) + + gvr := schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "nodes", + } + + // A node without the TypeMeta. + node := v1.Node{ + ObjectMeta: slim_metav1.ObjectMeta{Name: "test1"}, + } + + // Add() an object + err = ot.Add(node.DeepCopy()) + require.NoError(t, err, "Add") + + nodeAny, err := ot.Get(gvr, "", "test1") + require.NoError(t, err) + n := nodeAny.(*v1.Node) + require.Equal(t, "Node", n.TypeMeta.Kind) + require.Equal(t, "v1", n.TypeMeta.APIVersion) + + node.Name = "test2" + + err = ot.Create(gvr, node.DeepCopy(), "") + require.NoError(t, err, "Create") + nodeAny, err = ot.Get(gvr, "", "test2") + require.NoError(t, err) + n = nodeAny.(*v1.Node) + require.Equal(t, "Node", n.TypeMeta.Kind) + require.Equal(t, "v1", n.TypeMeta.APIVersion) + + err = ot.Update(gvr, node.DeepCopy(), "") + require.NoError(t, err, "Update") + nodeAny, err = ot.Get(gvr, "", "test2") + require.NoError(t, err) + n = nodeAny.(*v1.Node) + require.Equal(t, "Node", n.TypeMeta.Kind) + require.Equal(t, "v1", n.TypeMeta.APIVersion) + + // A cilium node without the TypeMeta. This tests that the + // APIVersion is correctly set when Group is non-empty. + ciliumNode := ciliumv2.CiliumNode{ + ObjectMeta: metav1.ObjectMeta{Name: "test1"}, + } + + gvr = schema.GroupVersionResource{ + Group: "cilium.io", + Version: "v2", + Resource: "ciliumnodes", + } + err = ot.Add(ciliumNode.DeepCopy()) + require.NoError(t, err, "Add") + + ciliumNodeAny, err := ot.Get(gvr, "", "test1") + require.NoError(t, err) + cn := ciliumNodeAny.(*ciliumv2.CiliumNode) + require.Equal(t, "CiliumNode", cn.TypeMeta.Kind) + require.Equal(t, "cilium.io/v2", cn.TypeMeta.APIVersion) + +} diff --git a/pkg/k8s/client/testutils/script_test.go b/pkg/k8s/client/testutils/script_test.go index b4d8f5290ec88..f4be06669f55e 100644 --- a/pkg/k8s/client/testutils/script_test.go +++ b/pkg/k8s/client/testutils/script_test.go @@ -9,14 +9,17 @@ import ( "maps" "testing" + "github.com/cilium/hive/cell" "github.com/cilium/hive/hivetest" "github.com/cilium/hive/script" "github.com/cilium/hive/script/scripttest" "github.com/spf13/pflag" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/cilium/cilium/pkg/hive" + v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" "github.com/cilium/cilium/pkg/time" ) @@ -32,6 +35,19 @@ func TestScript(t *testing.T) { func(t testing.TB, args []string) *script.Engine { h := hive.New( FakeClientCell(), + + // Also add an object through the clientset interface to check that it can be seen and retrieved + // using the k8s commands. + cell.Invoke(func(cs *FakeClientset) error { + _, err := cs.CiliumFakeClientset.CiliumV2().CiliumNodes().Create( + ctx, + &v2.CiliumNode{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + }, + metav1.CreateOptions{}, + ) + return err + }), ) flags := pflag.NewFlagSet("", pflag.ContinueOnError) h.RegisterFlags(flags) diff --git a/pkg/k8s/client/testutils/testdata/fake.txtar b/pkg/k8s/client/testutils/testdata/fake.txtar index c279a4e604cf9..f17cbe2e3810a 100644 --- a/pkg/k8s/client/testutils/testdata/fake.txtar +++ b/pkg/k8s/client/testutils/testdata/fake.txtar @@ -5,6 +5,11 @@ hive/start +# Test that we can retrieve the CiliumNode object added via [Clientset] in the invoke +# of the test runner. +k8s/get ciliumnodes test -o actual.yaml +! empty actual.yaml + # Add object for the Kubernetes and Slim clientsets k8s/add service.yaml k8s/update service.yaml @@ -60,6 +65,7 @@ ID *;/v1, Resource=services;test/echo *v1.Service false *;apiextensions.k8s.io/v1, Resource=customresourcedefinitions;/ciliumenvoyconfigs.cilium.io *v1.CustomResourceDefinition false *;cilium.io/v2, Resource=ciliumenvoyconfigs;default/cec *v2.CiliumEnvoyConfig false +*;cilium.io/v2, Resource=ciliumnodes;/test *v2.CiliumNode false *;multicluster.x-k8s.io/v1alpha1, Resource=serviceexports;/test *v1alpha1.ServiceExport false k8s;/v1, Resource=limitranges;bar/foo *v1.LimitRange false k8s;/v1, Resource=services;test/echo *v1.Service false @@ -69,6 +75,7 @@ ID *;/v1, Resource=services;test/echo true *;apiextensions.k8s.io/v1, Resource=customresourcedefinitions;/ciliumenvoyconfigs.cilium.io true *;cilium.io/v2, Resource=ciliumenvoyconfigs;default/cec true +*;cilium.io/v2, Resource=ciliumnodes;/test false *;multicluster.x-k8s.io/v1alpha1, Resource=serviceexports;/test true k8s;/v1, Resource=limitranges;bar/foo true k8s;/v1, Resource=services;test/echo true @@ -76,18 +83,22 @@ k8s;/v1, Resource=services;test/echo -- summary.expected -- *: - v1.services: 1 -- cilium.io.v2.ciliumenvoyconfigs: 1 - apiextensions.k8s.io.v1.customresourcedefinitions: 1 +- cilium.io.v2.ciliumenvoyconfigs: 1 +- cilium.io.v2.ciliumnodes: 1 - multicluster.x-k8s.io.v1alpha1.serviceexports: 1 +- v1.limitranges: 0 k8s: - v1.services: 1 - v1.limitranges: 1 -- summary.empty -- *: - v1.services: 0 -- cilium.io.v2.ciliumenvoyconfigs: 0 - apiextensions.k8s.io.v1.customresourcedefinitions: 0 +- cilium.io.v2.ciliumenvoyconfigs: 0 +- cilium.io.v2.ciliumnodes: 1 - multicluster.x-k8s.io.v1alpha1.serviceexports: 0 +- v1.limitranges: 0 k8s: - v1.services: 0 - v1.limitranges: 0 diff --git a/pkg/k8s/synced/crd.go b/pkg/k8s/synced/crd.go index 6ba9f43810960..c2c217f8d3ae8 100644 --- a/pkg/k8s/synced/crd.go +++ b/pkg/k8s/synced/crd.go @@ -91,6 +91,10 @@ func agentCRDResourceNames() []string { CRDResourceName(v2alpha1.L2AnnouncementName), ) + if option.Config.EnableVTEP { + result = append(result, CRDResourceName(v2alpha1.CVPName)) + } + return result } diff --git a/pkg/k8s/watchers/pod.go b/pkg/k8s/watchers/pod.go index db701e18e0157..89ca8b1a5aa07 100644 --- a/pkg/k8s/watchers/pod.go +++ b/pkg/k8s/watchers/pod.go @@ -51,6 +51,7 @@ import ( "github.com/cilium/cilium/pkg/policy" "github.com/cilium/cilium/pkg/source" "github.com/cilium/cilium/pkg/time" + ciliumTypes "github.com/cilium/cilium/pkg/types" "github.com/cilium/cilium/pkg/u8proto" wgTypes "github.com/cilium/cilium/pkg/wireguard/types" @@ -67,40 +68,42 @@ type k8sPodWatcherParams struct { K8sEventReporter *K8sEventReporter - Clientset k8sClient.Clientset - Resources agentK8s.Resources - K8sResourceSynced *k8sSynced.Resources - K8sAPIGroups *k8sSynced.APIGroups - EndpointManager endpointmanager.EndpointManager - PolicyUpdater *policy.Updater - IPCache *ipcache.IPCache - DB *statedb.DB - Pods statedb.Table[agentK8s.LocalPod] - NodeAddrs statedb.Table[datapathTables.NodeAddress] - CGroupManager cgroup.CGroupManager - LBConfig loadbalancer.Config - WgConfig wgTypes.WireguardConfig - IPSecConfig datapath.IPsecConfig + Clientset k8sClient.Clientset + Resources agentK8s.Resources + K8sResourceSynced *k8sSynced.Resources + K8sAPIGroups *k8sSynced.APIGroups + EndpointManager endpointmanager.EndpointManager + PolicyUpdater *policy.Updater + IPCache *ipcache.IPCache + DB *statedb.DB + Pods statedb.Table[agentK8s.LocalPod] + NodeAddrs statedb.Table[datapathTables.NodeAddress] + CGroupManager cgroup.CGroupManager + LBConfig loadbalancer.Config + WgConfig wgTypes.WireguardConfig + IPSecConfig datapath.IPsecConfig + HostNetworkManager datapath.IptablesManager } func newK8sPodWatcher(params k8sPodWatcherParams) *K8sPodWatcher { return &K8sPodWatcher{ - logger: params.Logger, - clientset: params.Clientset, - k8sEventReporter: params.K8sEventReporter, - k8sResourceSynced: params.K8sResourceSynced, - k8sAPIGroups: params.K8sAPIGroups, - endpointManager: params.EndpointManager, - policyManager: params.PolicyUpdater, - ipcache: params.IPCache, - cgroupManager: params.CGroupManager, - resources: params.Resources, - db: params.DB, - pods: params.Pods, - nodeAddrs: params.NodeAddrs, - lbConfig: params.LBConfig, - wgConfig: params.WgConfig, - ipsecConfig: params.IPSecConfig, + logger: params.Logger, + clientset: params.Clientset, + k8sEventReporter: params.K8sEventReporter, + k8sResourceSynced: params.K8sResourceSynced, + k8sAPIGroups: params.K8sAPIGroups, + endpointManager: params.EndpointManager, + policyManager: params.PolicyUpdater, + ipcache: params.IPCache, + cgroupManager: params.CGroupManager, + resources: params.Resources, + db: params.DB, + pods: params.Pods, + nodeAddrs: params.NodeAddrs, + lbConfig: params.LBConfig, + wgConfig: params.WgConfig, + ipsecConfig: params.IPSecConfig, + hostNetworkManager: params.HostNetworkManager, controllersStarted: make(chan struct{}), } @@ -118,18 +121,19 @@ type K8sPodWatcher struct { k8sResourceSynced *k8sSynced.Resources // k8sAPIGroups is a set of k8s API in use. They are setup in watchers, // and may be disabled while the agent runs. - k8sAPIGroups *k8sSynced.APIGroups - endpointManager endpointManager - policyManager policyManager - ipcache ipcacheManager - cgroupManager cgroupManager - resources agentK8s.Resources - db *statedb.DB - pods statedb.Table[agentK8s.LocalPod] - nodeAddrs statedb.Table[datapathTables.NodeAddress] - lbConfig loadbalancer.Config - wgConfig wgTypes.WireguardConfig - ipsecConfig datapath.IPsecConfig + k8sAPIGroups *k8sSynced.APIGroups + endpointManager endpointManager + policyManager policyManager + ipcache ipcacheManager + cgroupManager cgroupManager + resources agentK8s.Resources + db *statedb.DB + pods statedb.Table[agentK8s.LocalPod] + nodeAddrs statedb.Table[datapathTables.NodeAddress] + lbConfig loadbalancer.Config + wgConfig wgTypes.WireguardConfig + ipsecConfig datapath.IPsecConfig + hostNetworkManager hostNetworkManager // controllersStarted is a channel that is closed when all watchers that do not depend on // local node configuration have been started @@ -236,6 +240,16 @@ func (k *K8sPodWatcher) addK8sPodV1(pod *slim_corev1.Pod) error { return err } + hostPorts, ok := annotation.Get(pod, annotation.NoTrackHostPorts) + if ok && !pod.Spec.HostNetwork { + scopedLog.Warn(fmt.Sprintf("%s annotation present but pod does not have hostNetwork: true. ignoring", annotation.NoTrackHostPorts)) + } + + if pod.Spec.HostNetwork { + hostPorts = strings.ReplaceAll(hostPorts, " ", "") + k.hostNetworkManager.AddNoTrackHostPorts(pod.Namespace, pod.Name, strings.Split(hostPorts, ",")) + } + if pod.Spec.HostNetwork && !option.Config.EnableLocalRedirectPolicy { scopedLog.Debug("Skip pod event using host networking") return err @@ -285,6 +299,16 @@ func (k *K8sPodWatcher) updateK8sPodV1(oldK8sPod, newK8sPod *slim_corev1.Pod) er return err } + hostPorts, ok := annotation.Get(newK8sPod, annotation.NoTrackHostPorts) + if ok && !newK8sPod.Spec.HostNetwork { + scopedLog.Warn(fmt.Sprintf("%s annotation present but pod does not have hostNetwork: true. ignoring", annotation.NoTrackHostPorts)) + } + + if newK8sPod.Spec.HostNetwork { + hostPorts = strings.ReplaceAll(hostPorts, " ", "") + k.hostNetworkManager.AddNoTrackHostPorts(newK8sPod.Namespace, newK8sPod.Name, strings.Split(hostPorts, ",")) + } + if newK8sPod.Spec.HostNetwork && !option.Config.EnableLocalRedirectPolicy && !option.Config.EnableSocketLBTracing { scopedLog.Debug("Skip pod event using host networking") @@ -480,6 +504,10 @@ func (k *K8sPodWatcher) deleteK8sPodV1(pod *slim_corev1.Pod) error { logfields.HostIP, pod.Status.HostIP, ) + if pod.Spec.HostNetwork { + k.hostNetworkManager.RemoveNoTrackHostPorts(pod.Namespace, pod.Name) + } + k.cgroupManager.OnDeletePod(pod) skipped, err := k.deletePodHostData(pod) diff --git a/pkg/k8s/watchers/watcher.go b/pkg/k8s/watchers/watcher.go index c3569904ac62f..f5ee2f2b8e872 100644 --- a/pkg/k8s/watchers/watcher.go +++ b/pkg/k8s/watchers/watcher.go @@ -88,6 +88,11 @@ type ipcacheManager interface { DeleteOnMetadataMatch(IP string, source source.Source, namespace, name string) (namedPortsChanged bool) } +type hostNetworkManager interface { + AddNoTrackHostPorts(namespace, name string, ports []string) + RemoveNoTrackHostPorts(namespace, name string) +} + type K8sWatcher struct { logger *slog.Logger resourceGroupsFn func(logger *slog.Logger, cfg WatcherConfiguration) (resourceGroups, waitForCachesOnly []string) @@ -200,6 +205,7 @@ var ciliumResourceToGroupMapping = map[string]watcherInfo{ synced.CRDResourceName(cilium_v2.CCGName): {waitOnly, k8sAPIGroupCiliumCIDRGroupV2}, synced.CRDResourceName(v2alpha1.L2AnnouncementName): {skip, ""}, // Handled by L2 announcement directly synced.CRDResourceName(v2alpha1.CPIPName): {skip, ""}, // Handled by multi-pool IPAM allocator + synced.CRDResourceName(v2alpha1.CVPName): {skip, ""}, // Handled by vtep policy manager } func GetGroupsForCiliumResources(logger *slog.Logger, ciliumResources []string) ([]string, []string) { diff --git a/pkg/labels/labels.go b/pkg/labels/labels.go index de49863dfc1ce..b7a6394db007a 100644 --- a/pkg/labels/labels.go +++ b/pkg/labels/labels.go @@ -868,15 +868,3 @@ func parseSelectLabel(str string, delim byte) Label { return lbl } - -// generateLabelString generates the string representation of a label with -// the provided source, key, and value in the format "source:key=value". -func generateLabelString(source, key, value string) string { - return source + ":" + key + "=" + value -} - -// GenerateK8sLabelString generates the string representation of a label with -// the provided source, key, and value in the format "LabelSourceK8s:key=value". -func GenerateK8sLabelString(k, v string) string { - return generateLabelString(LabelSourceK8s, k, v) -} diff --git a/pkg/labels/labels_test.go b/pkg/labels/labels_test.go index d423a94bbe331..9dd9fb264e2b7 100644 --- a/pkg/labels/labels_test.go +++ b/pkg/labels/labels_test.go @@ -542,14 +542,6 @@ func BenchmarkLabel_String(b *testing.B) { } } -func BenchmarkGenerateLabelString(b *testing.B) { - b.ReportAllocs() - - for b.Loop() { - generateLabelString("foo", "key", "value") - } -} - func TestLabel_String(t *testing.T) { // with value l := NewLabel("io.kubernetes.pod.namespace", "kube-system", LabelSourceK8s) diff --git a/pkg/loadbalancer/backend.go b/pkg/loadbalancer/backend.go index cd77ebaf2fcdd..9cb975c844e6f 100644 --- a/pkg/loadbalancer/backend.go +++ b/pkg/loadbalancer/backend.go @@ -22,6 +22,8 @@ const ( ) // BackendParams defines the parameters of a backend for insertion into the backends table. +// +deepequal-gen=true +// +deepequal-gen:private-method=true type BackendParams struct { Address L3n4Addr @@ -55,6 +57,7 @@ type BackendParams struct { Unhealthy bool // UnhealthyUpdatedAt is the timestamp for when [Unhealthy] was last updated. + // +deepequal-gen=false UnhealthyUpdatedAt *time.Time } @@ -78,8 +81,21 @@ func (bep *BackendParams) GetZone() string { return bep.Zone.Zone } +func (bep *BackendParams) GetUnhealthyUpdatedAt() time.Time { + if bep.UnhealthyUpdatedAt == nil { + return time.Time{} + } + return *bep.UnhealthyUpdatedAt +} + +func (bep *BackendParams) DeepEqual(other *BackendParams) bool { + return bep.deepEqual(other) && + bep.GetUnhealthyUpdatedAt().Equal(other.GetUnhealthyUpdatedAt()) +} + // BackendZone locates the backend to a specific zone and specifies what zones // the backend should be used in for topology aware routing. +// +deepequal-gen=true type BackendZone struct { // Zone where backend is located. Zone string diff --git a/pkg/loadbalancer/benchmark/benchmark.go b/pkg/loadbalancer/benchmark/benchmark.go index a528366acafa2..d28b8aceffb1b 100644 --- a/pkg/loadbalancer/benchmark/benchmark.go +++ b/pkg/loadbalancer/benchmark/benchmark.go @@ -25,6 +25,7 @@ import ( k8sRuntime "k8s.io/apimachinery/pkg/runtime" daemonk8s "github.com/cilium/cilium/daemon/k8s" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/datapath/tables" "github.com/cilium/cilium/pkg/hive" "github.com/cilium/cilium/pkg/k8s" @@ -539,6 +540,9 @@ func testHive(maps lbmaps.LBMaps, node.LocalNodeStoreTestCell, cell.Provide( + func() cmtypes.ClusterInfo { + return cmtypes.ClusterInfo{} + }, func() loadbalancer.Config { return loadbalancer.Config{ UserConfig: loadbalancer.DefaultUserConfig, diff --git a/pkg/loadbalancer/cell/cell_test.go b/pkg/loadbalancer/cell/cell_test.go index 2c319d31c4895..12fb1b3a61947 100644 --- a/pkg/loadbalancer/cell/cell_test.go +++ b/pkg/loadbalancer/cell/cell_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" daemonk8s "github.com/cilium/cilium/daemon/k8s" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/datapath/tables" envoyCfg "github.com/cilium/cilium/pkg/envoy/config" "github.com/cilium/cilium/pkg/hive" @@ -38,8 +39,9 @@ func TestCell(t *testing.T) { metrics.Cell, kpr.Cell, Cell, - cell.Provide(source.NewSources), cell.Provide( + func() cmtypes.ClusterInfo { return cmtypes.ClusterInfo{} }, + source.NewSources, tables.NewNodeAddressTable, statedb.RWTable[tables.NodeAddress].ToTable, func() *option.DaemonConfig { diff --git a/pkg/loadbalancer/errors.go b/pkg/loadbalancer/errors.go index 8af4b5774fc59..6f1b3e1da03d6 100644 --- a/pkg/loadbalancer/errors.go +++ b/pkg/loadbalancer/errors.go @@ -13,4 +13,7 @@ var ( // ErrFrontendConflict occurs when a frontend is being upserted but it already // exists and is owned by a different service. ErrFrontendConflict = errors.New("frontend already owned by another service") + + // ErrInvalidL4Addr occurs when L4AddrFromString attempts to parse a malformed L4Addr string + ErrInvalidL4Addr = errors.New("invalid l4 addr format. expected /") ) diff --git a/pkg/loadbalancer/healthserver/script_test.go b/pkg/loadbalancer/healthserver/script_test.go index 856d1c66faaa7..7e375fce5e2be 100644 --- a/pkg/loadbalancer/healthserver/script_test.go +++ b/pkg/loadbalancer/healthserver/script_test.go @@ -86,6 +86,7 @@ func TestScript(t *testing.T) { maglev.Cell, node.LocalNodeStoreTestCell, cell.Provide( + func() cmtypes.ClusterInfo { return cmtypes.ClusterInfo{} }, func(cfg loadbalancer.TestConfig) *loadbalancer.TestConfig { return &cfg }, tables.NewNodeAddressTable, statedb.RWTable[tables.NodeAddress].ToTable, diff --git a/pkg/loadbalancer/loadbalancer.go b/pkg/loadbalancer/loadbalancer.go index e8cb7f7ce285c..c978ef55e4e7b 100644 --- a/pkg/loadbalancer/loadbalancer.go +++ b/pkg/loadbalancer/loadbalancer.go @@ -777,6 +777,27 @@ func (l L4Addr) String() string { return fmt.Sprintf("%d/%s", l.Port, l.Protocol) } +// L4AddrFromString returns a L4Addr from its string representation. +func L4AddrFromString(s string) (L4Addr, error) { + splitted := strings.Split(s, "/") + + if len(splitted) != 2 { + return L4Addr{}, fmt.Errorf("%w for %s", ErrInvalidL4Addr, s) + } + + proto, err := NewL4Type(strings.ToUpper(splitted[1])) + if err != nil { + return L4Addr{}, fmt.Errorf("%w for %s", err, splitted[0]) + } + + portUInt64, err := strconv.ParseUint(splitted[0], 10, 16) + if err != nil { + return L4Addr{}, fmt.Errorf("%s is not a valid port number. %w", splitted[1], err) + } + + return NewL4Addr(proto, uint16(portUInt64)), nil +} + // L3n4Addr is an unique L3+L4 address and scope (for traffic policies). type L3n4Addr unique.Handle[l3n4AddrRep] @@ -811,6 +832,16 @@ func (l L3n4Addr) AddrCluster() cmtypes.AddrCluster { return l.rep().addrCluster } +func (l *L3n4Addr) DeepEqual(other *L3n4Addr) bool { + if l == nil && other == nil { + return true + } + if other == nil || l == nil { + return false + } + return *l == *other +} + // NewL3n4Addr creates a new L3n4Addr. func NewL3n4Addr(protocol L4Type, addrCluster cmtypes.AddrCluster, portNumber uint16, scope uint8) L3n4Addr { lbport := NewL4Addr(protocol, portNumber) diff --git a/pkg/loadbalancer/loadbalancer_test.go b/pkg/loadbalancer/loadbalancer_test.go index c8b58ddce2777..26cc780bbbe8d 100644 --- a/pkg/loadbalancer/loadbalancer_test.go +++ b/pkg/loadbalancer/loadbalancer_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.yaml.in/yaml/v3" cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" @@ -78,6 +79,28 @@ func TestL4Addr_Equals(t *testing.T) { } } +func TestL3n4Addr_DeepEqual(t *testing.T) { + var v4, v6 L3n4Addr + require.NoError(t, v4.ParseFromString("1.1.1.1:80/TCP")) + require.NoError(t, v6.ParseFromString("[2001::1]:80/TCP")) + + assert.True(t, v4.DeepEqual(&v4)) + assert.True(t, v6.DeepEqual(&v6)) + assert.False(t, v4.DeepEqual(&v6)) + assert.False(t, v6.DeepEqual(&v4)) + + var nilp *L3n4Addr + assert.True(t, nilp.DeepEqual(nil)) + assert.False(t, nilp.DeepEqual(&v4)) + + var v4_2, v6_2 L3n4Addr + require.NoError(t, v4_2.ParseFromString("1.1.1.1:80/TCP")) + require.NoError(t, v6_2.ParseFromString("[2001::1]:80/TCP")) + + assert.True(t, v4.DeepEqual(&v4_2)) + assert.True(t, v6.DeepEqual(&v6_2)) +} + func TestL3n4Addr_Bytes(t *testing.T) { v4 := cmtypes.MustParseAddrCluster("1.1.1.1") v4c3 := cmtypes.MustParseAddrCluster("1.1.1.1@3") @@ -601,6 +624,34 @@ func TestServiceNameYAMLJSON(t *testing.T) { } } +func TestL4AddrParsing(t *testing.T) { + type testCase struct { + err bool + input string + output L4Addr + } + + testCases := []testCase{ + {false, "443/tcp", L4Addr{Protocol: TCP, Port: 443}}, + {false, "1312/udp", L4Addr{Protocol: UDP, Port: 1312}}, + {true, "65538/tcp", L4Addr{}}, // port > 16 bits + {true, "123/abcd", L4Addr{}}, // unknown proto + } + + for _, tc := range testCases { + addr, err := L4AddrFromString(tc.input) + if tc.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + // test the conversion back + require.Equal(t, addr.String(), strings.ToUpper(tc.input)) + } + + require.Equal(t, tc.output, addr) + } +} + func BenchmarkNewServiceName(b *testing.B) { b.ReportAllocs() for b.Loop() { diff --git a/pkg/loadbalancer/redirectpolicy/controller.go b/pkg/loadbalancer/redirectpolicy/controller.go index 9a8c7a97ed663..91a3e799fe9f5 100644 --- a/pkg/loadbalancer/redirectpolicy/controller.go +++ b/pkg/loadbalancer/redirectpolicy/controller.go @@ -152,6 +152,7 @@ func (c *lrpController) run(ctx context.Context, health cell.Health) error { cleanup(wtxn) } delete(cleanupFuncs, lrpID) + delete(watchSets, lrpID) if c.p.LRPMetrics != nil { c.p.LRPMetrics.DelLRPConfig(lrpID) } @@ -169,8 +170,10 @@ func (c *lrpController) run(ctx context.Context, health cell.Health) error { if chanIsClosed(fesInitWatch) { // Mark desired SkipLBs as initialized to allow pruning c.desiredSkipLBInit(wtxn) + + // All initializers marked done, we can stop tracking these. + initWatches = nil } - initWatches = nil } } @@ -272,7 +275,7 @@ func (c *lrpController) processRedirectPolicy(wtxn writer.WriteTxn, lrpID lb.Ser matchingPods = append(matchingPods, getPodInfo(pod)) } } - c.updateRedirectBackends(wtxn, ws, lrp, matchingPods) + c.updateRedirectBackends(wtxn, lrp, matchingPods) c.updateSkipLB(wtxn, ws, lrp, matchingPods) c.updateRedirects(wtxn, ws, cleanup, lrp, matchingPods) @@ -329,6 +332,13 @@ func (c *lrpController) updateRedirects(wtxn writer.WriteTxn, ws *statedb.WatchS Type: lb.SVCTypeLocalRedirect, ServiceName: lrpServiceName, ServicePort: feM.feAddr.Port(), + //if we only have one frontend mapping, we dont need the frontend port name so it will not check the port name in the backend ports + PortName: func() lb.FEPortName { + if len(lrp.FrontendMappings) > 1 { + return feM.fePort + } + return lb.FEPortName("") + }(), }, ) if err != nil { @@ -347,7 +357,7 @@ func (c *lrpController) updateRedirects(wtxn writer.WriteTxn, ws *statedb.WatchS return cleanup } -func (c *lrpController) updateRedirectBackends(wtxn writer.WriteTxn, ws *statedb.WatchSet, lrp *LocalRedirectPolicy, pods []podInfo) { +func (c *lrpController) updateRedirectBackends(wtxn writer.WriteTxn, lrp *LocalRedirectPolicy, pods []podInfo) { portNameMatches := func(portName string) bool { for bePortName := range lrp.BackendPortsByPortName { if string(bePortName) == strings.ToLower(portName) { @@ -395,8 +405,12 @@ func (c *lrpController) updateRedirectBackends(wtxn writer.WriteTxn, ws *statedb newCount := len(beps) orphanCount := 0 for be := range c.p.Writer.Backends().List(wtxn, lb.BackendByServiceName(lrpServiceName)) { + inst := be.GetInstance(lrpServiceName) + if inst == nil { + continue + } if slices.ContainsFunc(beps, func(bep lb.BackendParams) bool { - return bep.Address == be.Address + return inst.DeepEqual(&bep) }) { newCount-- } else { @@ -567,6 +581,7 @@ func (c *lrpController) frontendsToSkip(txn statedb.ReadTxn, ws *statedb.WatchSe feAddrs := []lb.L3n4Addr{} fes, watch := c.p.Writer.Frontends().ListWatch(txn, lb.FrontendByServiceName(targetName)) ws.Add(watch) + for fe := range fes { if lrp.LRPType == lrpConfigTypeAddr || fe.RedirectTo != nil { feAddrs = append(feAddrs, fe.Address) diff --git a/pkg/loadbalancer/redirectpolicy/script_test.go b/pkg/loadbalancer/redirectpolicy/script_test.go index beb41f7a04680..971b26f2b02a2 100644 --- a/pkg/loadbalancer/redirectpolicy/script_test.go +++ b/pkg/loadbalancer/redirectpolicy/script_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/require" daemonk8s "github.com/cilium/cilium/daemon/k8s" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/datapath/tables" envoyCfg "github.com/cilium/cilium/pkg/envoy/config" "github.com/cilium/cilium/pkg/hive" @@ -78,6 +79,7 @@ func TestScript(t *testing.T) { node.LocalNodeStoreTestCell, maglev.Cell, cell.Provide( + func() cmtypes.ClusterInfo { return cmtypes.ClusterInfo{} }, source.NewSources, func() *loadbalancer.TestConfig { return &loadbalancer.TestConfig{} }, tables.NewNodeAddressTable, diff --git a/pkg/loadbalancer/redirectpolicy/skiplb.go b/pkg/loadbalancer/redirectpolicy/skiplb.go index c6e977a3615b2..ca065c66b84be 100644 --- a/pkg/loadbalancer/redirectpolicy/skiplb.go +++ b/pkg/loadbalancer/redirectpolicy/skiplb.go @@ -385,9 +385,6 @@ func newSkipLBMap(p skiplbmapParams) (out bpf.MapOut[lbmaps.SkipLBMap], err erro p.Lifecycle.Append(cell.Hook{ OnStart: func(cell.HookContext) error { - if !p.NetNSCookieSupport() { - return nil - } return m.OpenOrCreate() }, OnStop: func(cell.HookContext) error { diff --git a/pkg/loadbalancer/redirectpolicy/testdata/address-malformed.txtar b/pkg/loadbalancer/redirectpolicy/testdata/address-malformed.txtar new file mode 100644 index 0000000000000..09b08f2ad58e2 --- /dev/null +++ b/pkg/loadbalancer/redirectpolicy/testdata/address-malformed.txtar @@ -0,0 +1,108 @@ +# Verifies that malformed AddressMatcher LRPs are not applied + +hive start + +# Add LRP and then LRP-selected pod. +k8s/add lrp-addr-malformed.yaml +k8s/add pod.yaml + +# Tables and maps should be empty. +* db/empty services frontends backends localredirectpolicies + +# Delete the LRP. +k8s/delete lrp-addr-malformed.yaml + +# Add it again LRP. +k8s/add lrp-addr-malformed.yaml + +# Tables and maps should still be empty. +* db/empty services frontends backends localredirectpolicies + +-- lrp-addr-malformed.yaml -- +apiVersion: "cilium.io/v2" +kind: CiliumLocalRedirectPolicy +metadata: + name: "lrp-addr" +spec: + redirectFrontend: + addressMatcher: + ip: "169.254.169.254" + toPorts: + - port: "5050" + name: "test" + protocol: TCP + - port: "5051" + name: "test1" + protocol: TCP + redirectBackend: + localEndpointSelector: + matchLabels: + app: proxy + toPorts: + - port: "80" + name: "test" + protocol: TCP + - port: "81" + name: "test1" + protocol: TCP + +-- lrp-addr.yaml -- +apiVersion: "cilium.io/v2" +kind: CiliumLocalRedirectPolicy +metadata: + name: "lrp-addr" + namespace: test +spec: + redirectFrontend: + addressMatcher: + ip: "169.254.169.254" + toPorts: + - port: "5050" + name: "test" + protocol: TCP + - port: "5051" + name: "test1" + protocol: TCP + redirectBackend: + localEndpointSelector: + matchLabels: + app: proxy + toPorts: + - port: "80" + name: "test" + protocol: TCP + - port: "81" + name: "test1" + protocol: TCP + +-- pod.yaml -- +apiVersion: v1 +kind: Pod +metadata: + name: lrp-pod + namespace: test + labels: + app: proxy +spec: + containers: + - name: lrp-pod + image: nginx + ports: + - containerPort: 80 + name: test + protocol: TCP + - containerPort: 81 + name: test1 + protocol: TCP + nodeName: testnode +status: + hostIP: 172.19.0.3 + hostIPs: + - ip: 172.19.0.3 + phase: Running + podIP: 10.244.2.1 + podIPs: + - ip: 10.244.2.1 + conditions: + - lastProbeTime: null + type: Ready diff --git a/pkg/loadbalancer/redirectpolicy/testdata/address-matcher-named-ports.txtar b/pkg/loadbalancer/redirectpolicy/testdata/address-matcher-named-ports.txtar new file mode 100644 index 0000000000000..d0f9cf03804d4 --- /dev/null +++ b/pkg/loadbalancer/redirectpolicy/testdata/address-matcher-named-ports.txtar @@ -0,0 +1,245 @@ +# AddressMatcherLRPs with named and unnamed ports, LRP resources applied in different orders + +hive start + +# Add LRP and then LRP-selected pod. +k8s/add lrp-addr.yaml +k8s/add pod.yaml + +# Add a correct LRP. + +# Compare tables +db/cmp localredirectpolicies lrp.table +db/cmp services services.table +db/cmp frontends frontends.table +db/cmp backends backends.table + +# Remove the LRP. +k8s/delete lrp-addr.yaml + +# Tables and maps should now be empty. +* db/empty services frontends backends localredirectpolicies + +# Add the LRP -- LRP-selected pod already exists. +k8s/add lrp-addr.yaml + +# Compare tables +db/cmp localredirectpolicies lrp.table +db/cmp services services.table +db/cmp frontends frontends.table +db/cmp backends backends.table + +# Remove the LRP. +k8s/delete lrp-addr.yaml + +# Tables and maps should now be empty. +* db/empty services frontends backends localredirectpolicies + +# Add LRP with single named port. +k8s/add lrp-addr-single.yaml +k8s/add pod-single.yaml + +# Compare tables +db/cmp localredirectpolicies lrp-single.table +db/cmp services services-single.table +db/cmp frontends frontends-single.table +db/cmp backends backends-single.table + +# Remove the LRP. +k8s/delete lrp-addr-single.yaml + +# Tables and maps should now be empty. +* db/empty services frontends backends localredirectpolicies + +# Add LRP with single unnamed port to cover https://github.com/cilium/cilium/issues/41407 +k8s/add lrp-addr-unnamed.yaml + +# Compare tables +db/cmp localredirectpolicies lrp-unnamed.table +db/cmp services services-unnamed.table +db/cmp frontends frontends-unnamed.table +db/cmp backends backends-unnamed.table + +-- lrp.table -- +Name Type FrontendType Frontends +test/lrp-addr address addr-named-ports 169.254.169.254:5050/TCP, 169.254.169.254:5051/TCP + +-- services.table -- +Name Source +test/lrp-addr:local-redirect k8s + +-- frontends.table -- +Address Type ServiceName Backends RedirectTo Status +169.254.169.254:5050/TCP LocalRedirect test/lrp-addr:local-redirect 10.244.2.1:50/TCP Done +169.254.169.254:5051/TCP LocalRedirect test/lrp-addr:local-redirect 10.244.2.1:51/TCP Done + +-- backends.table -- +Address Instances +10.244.2.1:50/TCP test/lrp-addr:local-redirect (test) +10.244.2.1:51/TCP test/lrp-addr:local-redirect (test1) + +-- lrp-single.table -- +Name Type FrontendType Frontends +test/lrp-addr-single address addr-single-port 169.254.169.254:5050/TCP + +-- lrp-unnamed.table -- +Name Type FrontendType Frontends +test/lrp-addr-unnamed address addr-single-port 169.254.169.254:5050/TCP + +-- services-single.table -- +Name Source +test/lrp-addr-single:local-redirect k8s + +-- services-unnamed.table -- +Name Source +test/lrp-addr-unnamed:local-redirect k8s + +-- frontends-single.table -- +Address Type ServiceName Backends RedirectTo Status +169.254.169.254:5050/TCP LocalRedirect test/lrp-addr-single:local-redirect 10.244.2.1:50/TCP Done + +-- frontends-unnamed.table -- +Address Type ServiceName Backends RedirectTo Status +169.254.169.254:5050/TCP LocalRedirect test/lrp-addr-unnamed:local-redirect 10.244.2.1:50/TCP Done + +-- backends-single.table -- +Address Instances +10.244.2.1:50/TCP test/lrp-addr-single:local-redirect + +-- backends-unnamed.table -- +Address Instances +10.244.2.1:50/TCP test/lrp-addr-unnamed:local-redirect + +-- lrp-addr.yaml -- +apiVersion: "cilium.io/v2" +kind: CiliumLocalRedirectPolicy +metadata: + name: "lrp-addr" + namespace: test +spec: + redirectFrontend: + addressMatcher: + ip: "169.254.169.254" + toPorts: + - port: "5050" + name: "test" + protocol: TCP + - port: "5051" + name: "test1" + protocol: TCP + redirectBackend: + localEndpointSelector: + matchLabels: + app: proxy + toPorts: + - port: "50" + name: "test" + protocol: TCP + - port: "51" + name: "test1" + protocol: TCP + +-- lrp-addr-single.yaml -- +apiVersion: "cilium.io/v2" +kind: CiliumLocalRedirectPolicy +metadata: + name: "lrp-addr-single" + namespace: test +spec: + redirectFrontend: + addressMatcher: + ip: "169.254.169.254" + toPorts: + - port: "5050" + name: "test" + protocol: TCP + redirectBackend: + localEndpointSelector: + matchLabels: + app: proxy + toPorts: + - port: "50" + name: "test" + protocol: TCP + +-- lrp-addr-unnamed.yaml -- +apiVersion: "cilium.io/v2" +kind: CiliumLocalRedirectPolicy +metadata: + name: "lrp-addr-unnamed" + namespace: test +spec: + redirectFrontend: + addressMatcher: + ip: "169.254.169.254" + toPorts: + - port: "5050" + protocol: TCP + redirectBackend: + localEndpointSelector: + matchLabels: + app: proxy + toPorts: + - port: "50" + protocol: TCP +-- pod.yaml -- +apiVersion: v1 +kind: Pod +metadata: + name: lrp-pod + namespace: test + labels: + app: proxy +spec: + containers: + - name: lrp-pod + image: nginx + ports: + - containerPort: 50 + name: test + protocol: TCP + - containerPort: 51 + name: test1 + protocol: TCP + nodeName: testnode +status: + hostIP: 172.19.0.3 + hostIPs: + - ip: 172.19.0.3 + phase: Running + podIP: 10.244.2.1 + podIPs: + - ip: 10.244.2.1 + conditions: + - lastProbeTime: null + type: Ready + +-- pod-single.yaml -- +apiVersion: v1 +kind: Pod +metadata: + name: lrp-pod + namespace: test + labels: + app: proxy +spec: + containers: + - name: lrp-pod + image: nginx + ports: + - containerPort: 50 + protocol: TCP + - containerPort: 51 + protocol: TCP + nodeName: testnode +status: + hostIP: 172.19.0.3 + hostIPs: + - ip: 172.19.0.3 + phase: Running + podIP: 10.244.2.1 + podIPs: + - ip: 10.244.2.1 + conditions: + - lastProbeTime: null + type: Ready diff --git a/pkg/loadbalancer/redirectpolicy/testdata/address.txtar b/pkg/loadbalancer/redirectpolicy/testdata/address.txtar index 71ecccae8b1c1..dc29061805ad4 100644 --- a/pkg/loadbalancer/redirectpolicy/testdata/address.txtar +++ b/pkg/loadbalancer/redirectpolicy/testdata/address.txtar @@ -45,6 +45,18 @@ db/cmp localredirectpolicies lrp-fixed.table lb/maps-dump lbmaps.actual * cmp lbmaps.actual maps-v4.expected +# Policy can be deleted and added back and we'll get to same state +k8s/delete lrp-addr.yaml + +# Policy and service table is now empty +* db/empty localredirectpolicies services + +# Adding it back gets us to the same state as before +k8s/add lrp-addr.yaml +db/cmp localredirectpolicies lrp-fixed.table +db/cmp services services-ipv4.table +db/cmp frontends frontends-ipv4.table + # Remove the remaining policy k8s/delete lrp-addr.yaml @@ -68,11 +80,19 @@ Name Source test/lrp-addr-ipv6:local-redirect k8s test/lrp-addr:local-redirect k8s +-- services-ipv4.table -- +Name Source +test/lrp-addr:local-redirect k8s + -- frontends.table -- Address Type ServiceName Backends RedirectTo Status 169.254.169.254:8080/TCP LocalRedirect test/lrp-addr:local-redirect 10.244.2.1:80/TCP Done [2001::1]:8080/TCP LocalRedirect test/lrp-addr-ipv6:local-redirect [2002::2]:80/TCP Done +-- frontends-ipv4.table -- +Address Type ServiceName Backends RedirectTo Status +169.254.169.253:8080/TCP LocalRedirect test/lrp-addr:local-redirect 10.244.2.1:80/TCP Done + -- backends.table -- Address Instances 10.244.2.1:80/TCP test/lrp-addr-ipv6:local-redirect (tcp), test/lrp-addr:local-redirect (tcp) diff --git a/pkg/loadbalancer/redirectpolicy/testdata/service.txtar b/pkg/loadbalancer/redirectpolicy/testdata/service.txtar index c0fddcff1fcb6..f57bc10ac53eb 100644 --- a/pkg/loadbalancer/redirectpolicy/testdata/service.txtar +++ b/pkg/loadbalancer/redirectpolicy/testdata/service.txtar @@ -53,6 +53,19 @@ sed 'name: "foo"' 'name: "tcp"' lrp-svc.yaml k8s/update lrp-svc.yaml db/cmp frontends frontends.table +# Policy can be deleted and added back and we'll get to same state +k8s/delete lrp-svc.yaml + +# Policy table is now empty +* db/empty localredirectpolicies +db/cmp services services-before.table + +# Adding it back gets us to the same state as before +k8s/add lrp-svc.yaml +db/cmp localredirectpolicies lrp.table +db/cmp services services.table +db/cmp frontends frontends.table + # Removing policy reverts (but we'll get new backend id) k8s/delete lrp-svc.yaml db/cmp services services-before.table @@ -160,8 +173,8 @@ SVC: ID=7 ADDR=[1001::1]:7070/UDP SLOT=1 BEID=11 COUNT=0 QCOUNT=0 FLAGS=LocalRed SVC: ID=8 ADDR=[1001::1]:8080/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=LocalRedirect SVC: ID=8 ADDR=[1001::1]:8080/TCP SLOT=1 BEID=12 COUNT=0 QCOUNT=0 FLAGS=LocalRedirect -- maps-after.expected -- -BE: ID=16 ADDR=10.244.1.1:7070/UDP STATE=active -BE: ID=17 ADDR=10.244.1.1:8080/TCP STATE=active +BE: ID=22 ADDR=10.244.1.1:7070/UDP STATE=active +BE: ID=23 ADDR=10.244.1.1:8080/TCP STATE=active REV: ID=5 ADDR=169.254.169.254:7070 REV: ID=6 ADDR=169.254.169.254:8080 REV: ID=7 ADDR=[1001::1]:7070 @@ -169,9 +182,9 @@ REV: ID=8 ADDR=[1001::1]:8080 SVC: ID=0 ADDR=169.254.169.254:0/ANY SLOT=0 LBALG=undef AFFTimeout=0 COUNT=0 QCOUNT=0 FLAGS=ClusterIP+non-routable SVC: ID=0 ADDR=[1001::1]:0/ANY SLOT=0 LBALG=undef AFFTimeout=0 COUNT=0 QCOUNT=0 FLAGS=ClusterIP+non-routable SVC: ID=5 ADDR=169.254.169.254:7070/UDP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=ClusterIP+non-routable -SVC: ID=5 ADDR=169.254.169.254:7070/UDP SLOT=1 BEID=16 COUNT=0 QCOUNT=0 FLAGS=ClusterIP+non-routable +SVC: ID=5 ADDR=169.254.169.254:7070/UDP SLOT=1 BEID=22 COUNT=0 QCOUNT=0 FLAGS=ClusterIP+non-routable SVC: ID=6 ADDR=169.254.169.254:8080/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=ClusterIP+non-routable -SVC: ID=6 ADDR=169.254.169.254:8080/TCP SLOT=1 BEID=17 COUNT=0 QCOUNT=0 FLAGS=ClusterIP+non-routable +SVC: ID=6 ADDR=169.254.169.254:8080/TCP SLOT=1 BEID=23 COUNT=0 QCOUNT=0 FLAGS=ClusterIP+non-routable SVC: ID=7 ADDR=[1001::1]:7070/UDP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=0 QCOUNT=0 FLAGS=ClusterIP+non-routable SVC: ID=8 ADDR=[1001::1]:8080/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=0 QCOUNT=0 FLAGS=ClusterIP+non-routable -- lrp-svc.yaml -- diff --git a/pkg/loadbalancer/reflectors/k8s.go b/pkg/loadbalancer/reflectors/k8s.go index 829cb3c74d7f9..fc96ea9a18d18 100644 --- a/pkg/loadbalancer/reflectors/k8s.go +++ b/pkg/loadbalancer/reflectors/k8s.go @@ -559,6 +559,14 @@ func upsertHostPort(netnsCookie lbmaps.HaveNetNSCookieSupport, config loadbalanc containers := slices.Concat(pod.Spec.InitContainers, pod.Spec.Containers) serviceNamePrefix := hostPortServiceNamePrefix(pod) + type podServices struct { + service loadbalancer.Service + bes []loadbalancer.BackendParams + fes sets.Set[loadbalancer.FrontendParams] + } + + servicesForThisPod := make(map[loadbalancer.ServiceName]*podServices) + updatedServices := sets.New[loadbalancer.ServiceName]() for _, c := range containers { for _, p := range c.Ports { @@ -589,10 +597,24 @@ func upsertHostPort(netnsCookie lbmaps.HaveNetNSCookieSupport, config loadbalanc pod.ObjectMeta.UID), ) + svc, ok := servicesForThisPod[serviceName] + if !ok { + servicesForThisPod[serviceName] = &podServices{ + service: loadbalancer.Service{ + ExtTrafficPolicy: loadbalancer.SVCTrafficPolicyCluster, + IntTrafficPolicy: loadbalancer.SVCTrafficPolicyCluster, + Name: serviceName, + LoopbackHostPort: false, + Source: source.Kubernetes, + }, + fes: sets.Set[loadbalancer.FrontendParams]{}, + } + svc = servicesForThisPod[serviceName] + } + var ipv4, ipv6 bool // Construct the backends from the pod IPs and container ports. - var bes []loadbalancer.BackendParams for _, podIP := range podIPs { addr, err := cmtypes.ParseAddrCluster(podIP) if err != nil { @@ -614,11 +636,9 @@ func upsertHostPort(netnsCookie lbmaps.HaveNetNSCookieSupport, config loadbalanc ), Weight: loadbalancer.DefaultBackendWeight, } - bes = append(bes, bep) + svc.bes = append(svc.bes, bep) } - loopbackHostport := false - feIP := net.ParseIP(p.HostIP) if feIP != nil && feIP.IsLoopback() && !netnsCookie() { log.Warn("The requested loopback address for hostIP is not supported for kernels which don't provide netns cookies. Ignoring.", @@ -642,7 +662,12 @@ func upsertHostPort(netnsCookie lbmaps.HaveNetNSCookieSupport, config loadbalanc } else { feIP = net.IPv6zero } - loopbackHostport = true + svc.service.LoopbackHostPort = true + } else if svc.service.LoopbackHostPort { + // if it's not a loopback but the service was previously marked with LoopbackHostPort, then it's + // an unsupported combination + log.Warn("service with LoopbackHostPort not supported for port with non-loopback address") + continue } feIPs = append(feIPs, feIP) } else if feIP == nil { @@ -654,8 +679,6 @@ func upsertHostPort(netnsCookie lbmaps.HaveNetNSCookieSupport, config loadbalanc } } - fes := make([]loadbalancer.FrontendParams, 0, len(feIPs)) - for _, feIP := range feIPs { addr := cmtypes.MustAddrClusterFromIP(feIP) fe := loadbalancer.FrontendParams{ @@ -669,29 +692,25 @@ func upsertHostPort(netnsCookie lbmaps.HaveNetNSCookieSupport, config loadbalanc ), ServicePort: uint16(p.HostPort), } - fes = append(fes, fe) - } - svc := &loadbalancer.Service{ - ExtTrafficPolicy: loadbalancer.SVCTrafficPolicyCluster, - IntTrafficPolicy: loadbalancer.SVCTrafficPolicyCluster, - Name: serviceName, - LoopbackHostPort: loopbackHostport, - Source: source.Kubernetes, - } - - err = writer.UpsertServiceAndFrontends(wtxn, svc, fes...) - if err != nil { - return fmt.Errorf("UpsertServiceAndFrontends: %w", err) - } - if err := writer.SetBackends(wtxn, serviceName, source.Kubernetes, bes...); err != nil { - return fmt.Errorf("SetBackends: %w", err) + svc.fes.Insert(fe) } updatedServices.Insert(serviceName) } } + for serviceName, svc := range servicesForThisPod { + err := writer.UpsertServiceAndFrontends(wtxn, &svc.service, svc.fes.UnsortedList()...) + if err != nil { + return fmt.Errorf("UpsertServiceAndFrontends: %w", err) + } + + if err := writer.SetBackends(wtxn, serviceName, source.Kubernetes, svc.bes...); err != nil { + return fmt.Errorf("SetBackends: %w", err) + } + } + // Find and remove orphaned HostPort services, frontends and backends // if 'HostPort' has changed or has been unset. for svc := range writer.Services().Prefix(wtxn, loadbalancer.ServiceByName(serviceNamePrefix)) { diff --git a/pkg/loadbalancer/repl/main.go b/pkg/loadbalancer/repl/main.go index 806eb43355922..98b444ce9c642 100644 --- a/pkg/loadbalancer/repl/main.go +++ b/pkg/loadbalancer/repl/main.go @@ -12,6 +12,7 @@ import ( "github.com/spf13/pflag" daemonk8s "github.com/cilium/cilium/daemon/k8s" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/datapath/tables" envoyCfg "github.com/cilium/cilium/pkg/envoy/config" "github.com/cilium/cilium/pkg/hive" @@ -89,8 +90,9 @@ var Hive = hive.New( metrics.Cell, cell.Config(loadbalancer.TestConfig{}), cell.Config(envoyCfg.SecretSyncConfig{}), - cell.Provide(source.NewSources), cell.Provide( + func() cmtypes.ClusterInfo { return cmtypes.ClusterInfo{} }, + source.NewSources, tables.NewNodeAddressTable, statedb.RWTable[tables.NodeAddress].ToTable, func() *option.DaemonConfig { diff --git a/pkg/loadbalancer/tests/script_test.go b/pkg/loadbalancer/tests/script_test.go index 893e52a6966cf..279e884d160eb 100644 --- a/pkg/loadbalancer/tests/script_test.go +++ b/pkg/loadbalancer/tests/script_test.go @@ -26,6 +26,7 @@ import ( "github.com/stretchr/testify/require" daemonk8s "github.com/cilium/cilium/daemon/k8s" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/datapath/tables" envoyCfg "github.com/cilium/cilium/pkg/envoy/config" "github.com/cilium/cilium/pkg/hive" @@ -89,6 +90,7 @@ func TestScript(t *testing.T) { maglev.Cell, node.LocalNodeStoreTestCell, cell.Provide( + func() cmtypes.ClusterInfo { return cmtypes.ClusterInfo{} }, func(cfg loadbalancer.TestConfig) *loadbalancer.TestConfig { return &cfg }, tables.NewNodeAddressTable, statedb.RWTable[tables.NodeAddress].ToTable, diff --git a/pkg/loadbalancer/tests/testdata/graceful-termination.txtar b/pkg/loadbalancer/tests/testdata/graceful-termination.txtar index 234005a0d5263..6aedb52ecadc2 100644 --- a/pkg/loadbalancer/tests/testdata/graceful-termination.txtar +++ b/pkg/loadbalancer/tests/testdata/graceful-termination.txtar @@ -121,7 +121,7 @@ Address Type ServiceName Status Backends -- frontends-terminating4.table -- Address Type ServiceName Status Backends -10.96.116.33:8081/TCP ClusterIP test/graceful-term-svc Done 10.244.0.112:8081/TCP, 10.244.0.113:8081/TCP +10.96.116.33:8081/TCP ClusterIP test/graceful-term-svc Done 10.244.0.113:8081/TCP -- backends.table -- Address Instances NodeName diff --git a/pkg/loadbalancer/tests/testdata/hostport.txtar b/pkg/loadbalancer/tests/testdata/hostport.txtar index 99429fb856817..13ac58304d011 100644 --- a/pkg/loadbalancer/tests/testdata/hostport.txtar +++ b/pkg/loadbalancer/tests/testdata/hostport.txtar @@ -93,22 +93,27 @@ default/other-app:host-port:4444:22222222-2e9b-4c61-8454-ae81344876d8 k8s -- frontends.table -- Address Type Status ServiceName Backends 0.0.0.0:4444/TCP HostPort Done default/my-app:host-port:4444:11111111-2e9b-4c61-8454-ae81344876d8 10.244.1.113:80/TCP +0.0.0.0:4444/UDP HostPort Done default/my-app:host-port:4444:11111111-2e9b-4c61-8454-ae81344876d8 10.244.1.113:80/UDP -- frontends2.table -- Address Type Status ServiceName Backends 0.0.0.0:4444/TCP HostPort Done default/other-app:host-port:4444:22222222-2e9b-4c61-8454-ae81344876d8 10.244.1.114:80/TCP +0.0.0.0:4444/UDP HostPort Done default/other-app:host-port:4444:22222222-2e9b-4c61-8454-ae81344876d8 10.244.1.114:80/UDP -- frontends3.table -- Address Type Status ServiceName Backends 0.0.0.0:5555/TCP HostPort Done default/other-app:host-port:5555:22222222-2e9b-4c61-8454-ae81344876d8 10.244.1.114:80/TCP +0.0.0.0:5555/UDP HostPort Done default/other-app:host-port:5555:22222222-2e9b-4c61-8454-ae81344876d8 10.244.1.114:80/UDP -- backends.table -- Address Instances 10.244.1.113:80/TCP default/my-app:host-port:4444:11111111-2e9b-4c61-8454-ae81344876d8 +10.244.1.113:80/UDP default/my-app:host-port:4444:11111111-2e9b-4c61-8454-ae81344876d8 -- backends3.table -- Address Instances 10.244.1.114:80/TCP default/other-app:host-port:5555:22222222-2e9b-4c61-8454-ae81344876d8 +10.244.1.114:80/UDP default/other-app:host-port:5555:22222222-2e9b-4c61-8454-ae81344876d8 -- pod.yaml -- apiVersion: v1 @@ -130,6 +135,9 @@ spec: - containerPort: 80 hostPort: 4444 protocol: TCP + - containerPort: 80 + hostPort: 4444 + protocol: UDP resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File @@ -173,6 +181,9 @@ spec: - containerPort: 80 hostPort: 4444 protocol: TCP + - containerPort: 80 + hostPort: 4444 + protocol: UDP resources: {} nodeName: testnode restartPolicy: Always @@ -195,25 +206,46 @@ status: -- lbmaps.expected -- BE: ID=1 ADDR=10.244.1.113:80/TCP STATE=active +BE: ID=2 ADDR=10.244.1.113:80/UDP STATE=active REV: ID=1 ADDR=0.0.0.0:4444 REV: ID=2 ADDR=1.1.1.1:4444 +REV: ID=3 ADDR=0.0.0.0:4444 +REV: ID=4 ADDR=1.1.1.1:4444 SVC: ID=1 ADDR=0.0.0.0:4444/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort+non-routable SVC: ID=1 ADDR=0.0.0.0:4444/TCP SLOT=1 BEID=1 COUNT=0 QCOUNT=0 FLAGS=HostPort+non-routable SVC: ID=2 ADDR=1.1.1.1:4444/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort SVC: ID=2 ADDR=1.1.1.1:4444/TCP SLOT=1 BEID=1 COUNT=0 QCOUNT=0 FLAGS=HostPort +SVC: ID=3 ADDR=0.0.0.0:4444/UDP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort+non-routable +SVC: ID=3 ADDR=0.0.0.0:4444/UDP SLOT=1 BEID=2 COUNT=0 QCOUNT=0 FLAGS=HostPort+non-routable +SVC: ID=4 ADDR=1.1.1.1:4444/UDP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort +SVC: ID=4 ADDR=1.1.1.1:4444/UDP SLOT=1 BEID=2 COUNT=0 QCOUNT=0 FLAGS=HostPort -- lbmaps2.expected -- -BE: ID=2 ADDR=10.244.1.114:80/TCP STATE=active -REV: ID=3 ADDR=0.0.0.0:4444 -REV: ID=4 ADDR=1.1.1.1:4444 -SVC: ID=3 ADDR=0.0.0.0:4444/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort+non-routable -SVC: ID=3 ADDR=0.0.0.0:4444/TCP SLOT=1 BEID=2 COUNT=0 QCOUNT=0 FLAGS=HostPort+non-routable -SVC: ID=4 ADDR=1.1.1.1:4444/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort -SVC: ID=4 ADDR=1.1.1.1:4444/TCP SLOT=1 BEID=2 COUNT=0 QCOUNT=0 FLAGS=HostPort +BE: ID=3 ADDR=10.244.1.114:80/TCP STATE=active +BE: ID=4 ADDR=10.244.1.114:80/UDP STATE=active +REV: ID=5 ADDR=0.0.0.0:4444 +REV: ID=6 ADDR=1.1.1.1:4444 +REV: ID=7 ADDR=0.0.0.0:4444 +REV: ID=8 ADDR=1.1.1.1:4444 +SVC: ID=5 ADDR=0.0.0.0:4444/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort+non-routable +SVC: ID=5 ADDR=0.0.0.0:4444/TCP SLOT=1 BEID=3 COUNT=0 QCOUNT=0 FLAGS=HostPort+non-routable +SVC: ID=6 ADDR=1.1.1.1:4444/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort +SVC: ID=6 ADDR=1.1.1.1:4444/TCP SLOT=1 BEID=3 COUNT=0 QCOUNT=0 FLAGS=HostPort +SVC: ID=7 ADDR=0.0.0.0:4444/UDP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort+non-routable +SVC: ID=7 ADDR=0.0.0.0:4444/UDP SLOT=1 BEID=4 COUNT=0 QCOUNT=0 FLAGS=HostPort+non-routable +SVC: ID=8 ADDR=1.1.1.1:4444/UDP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort +SVC: ID=8 ADDR=1.1.1.1:4444/UDP SLOT=1 BEID=4 COUNT=0 QCOUNT=0 FLAGS=HostPort -- lbmaps3.expected -- -BE: ID=2 ADDR=10.244.1.114:80/TCP STATE=active -REV: ID=5 ADDR=0.0.0.0:5555 -REV: ID=6 ADDR=1.1.1.1:5555 -SVC: ID=5 ADDR=0.0.0.0:5555/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort+non-routable -SVC: ID=5 ADDR=0.0.0.0:5555/TCP SLOT=1 BEID=2 COUNT=0 QCOUNT=0 FLAGS=HostPort+non-routable -SVC: ID=6 ADDR=1.1.1.1:5555/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort -SVC: ID=6 ADDR=1.1.1.1:5555/TCP SLOT=1 BEID=2 COUNT=0 QCOUNT=0 FLAGS=HostPort +BE: ID=3 ADDR=10.244.1.114:80/TCP STATE=active +BE: ID=4 ADDR=10.244.1.114:80/UDP STATE=active +REV: ID=10 ADDR=1.1.1.1:5555 +REV: ID=11 ADDR=0.0.0.0:5555 +REV: ID=12 ADDR=1.1.1.1:5555 +REV: ID=9 ADDR=0.0.0.0:5555 +SVC: ID=10 ADDR=1.1.1.1:5555/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort +SVC: ID=10 ADDR=1.1.1.1:5555/TCP SLOT=1 BEID=3 COUNT=0 QCOUNT=0 FLAGS=HostPort +SVC: ID=11 ADDR=0.0.0.0:5555/UDP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort+non-routable +SVC: ID=11 ADDR=0.0.0.0:5555/UDP SLOT=1 BEID=4 COUNT=0 QCOUNT=0 FLAGS=HostPort+non-routable +SVC: ID=12 ADDR=1.1.1.1:5555/UDP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort +SVC: ID=12 ADDR=1.1.1.1:5555/UDP SLOT=1 BEID=4 COUNT=0 QCOUNT=0 FLAGS=HostPort +SVC: ID=9 ADDR=0.0.0.0:5555/TCP SLOT=0 LBALG=undef AFFTimeout=0 COUNT=1 QCOUNT=0 FLAGS=HostPort+non-routable +SVC: ID=9 ADDR=0.0.0.0:5555/TCP SLOT=1 BEID=3 COUNT=0 QCOUNT=0 FLAGS=HostPort+non-routable diff --git a/pkg/loadbalancer/writer/writer_test.go b/pkg/loadbalancer/writer/writer_test.go index de409117f6043..373cfdf4d6eba 100644 --- a/pkg/loadbalancer/writer/writer_test.go +++ b/pkg/loadbalancer/writer/writer_test.go @@ -18,8 +18,9 @@ import ( "github.com/cilium/statedb/reconciler" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "k8s.io/utils/ptr" - "github.com/cilium/cilium/pkg/clustermesh/types" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/datapath/tables" "github.com/cilium/cilium/pkg/hive" "github.com/cilium/cilium/pkg/kpr" @@ -27,8 +28,6 @@ import ( "github.com/cilium/cilium/pkg/node" "github.com/cilium/cilium/pkg/option" "github.com/cilium/cilium/pkg/source" - - "k8s.io/utils/ptr" ) type testParams struct { @@ -50,6 +49,7 @@ func fixture(t testing.TB) (p testParams) { node.LocalNodeStoreTestCell, Cell, cell.Provide( + func() cmtypes.ClusterInfo { return cmtypes.ClusterInfo{} }, func() *option.DaemonConfig { return &option.DaemonConfig{} }, tables.NewNodeAddressTable, statedb.RWTable[tables.NodeAddress].ToTable, @@ -66,10 +66,10 @@ func fixture(t testing.TB) (p testParams) { return p } -func intToAddr(i int) types.AddrCluster { +func intToAddr(i int) cmtypes.AddrCluster { var addr [4]byte binary.BigEndian.PutUint32(addr[:], 0x0100_0000+uint32(i)) - addrCluster, _ := types.AddrClusterFromIP(addr[:]) + addrCluster, _ := cmtypes.AddrClusterFromIP(addr[:]) return addrCluster } diff --git a/pkg/loadbalancer/zz_generated.deepequal.go b/pkg/loadbalancer/zz_generated.deepequal.go index 405ce2a3781a9..ce8f432a6e5fd 100644 --- a/pkg/loadbalancer/zz_generated.deepequal.go +++ b/pkg/loadbalancer/zz_generated.deepequal.go @@ -8,6 +8,94 @@ package loadbalancer +// deepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *BackendParams) deepEqual(other *BackendParams) bool { + if other == nil { + return false + } + + if !in.Address.DeepEqual(&other.Address) { + return false + } + + if ((in.PortNames != nil) && (other.PortNames != nil)) || ((in.PortNames == nil) != (other.PortNames == nil)) { + in, other := &in.PortNames, &other.PortNames + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + if in.Weight != other.Weight { + return false + } + if in.NodeName != other.NodeName { + return false + } + if (in.Zone == nil) != (other.Zone == nil) { + return false + } else if in.Zone != nil { + if !in.Zone.DeepEqual(other.Zone) { + return false + } + } + + if in.ClusterID != other.ClusterID { + return false + } + if in.Source != other.Source { + return false + } + if in.State != other.State { + return false + } + if in.Unhealthy != other.Unhealthy { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *BackendZone) DeepEqual(other *BackendZone) bool { + if other == nil { + return false + } + + if in.Zone != other.Zone { + return false + } + if ((in.ForZones != nil) && (other.ForZones != nil)) || ((in.ForZones == nil) != (other.ForZones == nil)) { + in, other := &in.ForZones, &other.ForZones + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + return true +} + // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. func (in *Config) DeepEqual(other *Config) bool { diff --git a/pkg/logging/logfields/logfields.go b/pkg/logging/logfields/logfields.go index 6ee34472cd50f..33e36f3d0e7da 100644 --- a/pkg/logging/logfields/logfields.go +++ b/pkg/logging/logfields/logfields.go @@ -419,6 +419,9 @@ const ( // CiliumEgressGatewayPolicyName is the name of a CiliumEgressGatewayPolicy CiliumEgressGatewayPolicyName = "ciliumEgressGatewayPolicyName" + // CiliumVtepPolicyName is the name of a CiliumVtepPolicy + CiliumVtepPolicyName = "ciliumVtepPolicyName" + // CiliumClusterwideEnvoyConfigName is the name of a CiliumClusterwideEnvoyConfig CiliumClusterwideEnvoyConfigName = "ciliumClusterwideEnvoyConfigName" @@ -821,6 +824,12 @@ const ( // GatewayIP is the gateway IP used in a given egress policy GatewayIP = "gatewayIP" + // VtepIP is the ip address of remote Vxlan tunnel Endpoint + VtepIP = "vtepIP" + + // VtepMAC is the mac address of remote vxlan tunnel Endpoint + VtepMAC = "vtepMAC" + // Number of Backends failed while restoration. RestoredBackends = "restoredBackends" diff --git a/pkg/maps/cells.go b/pkg/maps/cells.go index faef0c367d207..acb36c298888b 100644 --- a/pkg/maps/cells.go +++ b/pkg/maps/cells.go @@ -24,6 +24,7 @@ import ( "github.com/cilium/cilium/pkg/maps/policymap" "github.com/cilium/cilium/pkg/maps/signalmap" "github.com/cilium/cilium/pkg/maps/srv6map" + "github.com/cilium/cilium/pkg/maps/vtep_policy" ) // Cell contains all cells which are providing BPF Maps. @@ -77,6 +78,9 @@ var Cell = cell.Module( // Provides access to the encryption map. encrypt.Cell, + + // Provides access to vtep policy maps + vtep_policy.Cell, ) type mapApiHandlerOut struct { diff --git a/pkg/maps/vtep_policy/doc.go b/pkg/maps/vtep_policy/doc.go new file mode 100644 index 0000000000000..45aab44426879 --- /dev/null +++ b/pkg/maps/vtep_policy/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// +groupName=maps +package vtep_policy diff --git a/pkg/maps/vtep_policy/policy.go b/pkg/maps/vtep_policy/policy.go new file mode 100644 index 0000000000000..78eee7dc046b9 --- /dev/null +++ b/pkg/maps/vtep_policy/policy.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package vtep_policy + +import "github.com/cilium/hive/cell" + +var Cell = cell.Module( + "vteppolicy", + "VTEP policy provide access to the egress gateway datapath maps", + cell.Provide(createPolicyMapFromDaemonConfig), +) diff --git a/pkg/maps/vtep_policy/vtep.go b/pkg/maps/vtep_policy/vtep.go new file mode 100644 index 0000000000000..37ba5c5b7e72c --- /dev/null +++ b/pkg/maps/vtep_policy/vtep.go @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package vtep_policy + +import ( + "fmt" + "net/netip" + + "log/slog" + + "github.com/cilium/cilium/pkg/bpf" + "github.com/cilium/cilium/pkg/defaults" + "github.com/cilium/cilium/pkg/ebpf" + "github.com/cilium/cilium/pkg/mac" + "github.com/cilium/cilium/pkg/metrics" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/types" + "github.com/cilium/hive/cell" +) + +const ( + MaxEntries = 16384 + // Name is the canonical name for the VTEP map on the filesystem. + VtepPolicyMapName = "cilium_vtep_policy_map" +) + +// Must be in sync with struct vtep_key in +type VtepPolicyKey struct { + PrefixLen uint32 `align:"prefixlen"` + SourceIP types.IPv4 `align:"src_ip"` + DestCIDR types.IPv4 `align:"dst_ip"` +} + +func (k VtepPolicyKey) String() string { + return fmt.Sprintf("%s.%s/%d", k.SourceIP, k.DestCIDR, k.PrefixLen) +} + +func (k *VtepPolicyKey) New() bpf.MapKey { return &VtepPolicyKey{} } + +// NewKey returns an Key based on the provided source IP address and destination CIDR +func NewKey(srcIP netip.Addr, dstCIDR netip.Prefix) VtepPolicyKey { + result := VtepPolicyKey{} + + ip4 := srcIP.As4() + copy(result.SourceIP[:], ip4[:]) + + cidr := dstCIDR.Addr().As4() + copy(result.DestCIDR[:], cidr[:]) + + result.PrefixLen = 32 + uint32(dstCIDR.Bits()) + + return result +} + +// VtepPolicyVal implements the bpf.MapValue interface. It contains the +// VTEP endpoint MAC and IP +type VtepPolicyVal struct { + Mac mac.Uint64MAC `align:"vtep_mac"` + VtepIp types.IPv4 `align:"tunnel_endpoint"` + _ [4]byte +} + +func (v *VtepPolicyVal) String() string { + return fmt.Sprintf("vtepmac=%s tunnelendpoint=%s", + v.Mac, v.VtepIp) +} + +func (v *VtepPolicyVal) New() bpf.MapValue { return &VtepPolicyVal{} } + +// Map represents an VTEP BPF map. +type VtepPolicyMap struct { + m *bpf.Map +} + +func createPolicyMapFromDaemonConfig(lifecycle cell.Lifecycle, cfg *option.DaemonConfig, metricsRegistry *metrics.Registry) bpf.MapOut[*VtepPolicyMap] { + if !cfg.EnableVTEP || !cfg.EnableIPv4 { + return bpf.NewMapOut[*VtepPolicyMap](nil) + } + + return bpf.NewMapOut(newVtepPolicyMap(lifecycle, metricsRegistry, ebpf.PinByName)) +} + +// CreatePrivatePolicyMap4 creates an unpinned IPv4 policy map. +// +// Useful for testing. +func CreatePrivatePolicyMap(lc cell.Lifecycle, registry *metrics.Registry) *VtepPolicyMap { + return newVtepPolicyMap(lc, registry, ebpf.PinNone) +} + +func newVtepPolicyMap(lc cell.Lifecycle, registry *metrics.Registry, pinning ebpf.PinType) *VtepPolicyMap { + m := bpf.NewMap( + VtepPolicyMapName, + ebpf.LPMTrie, + &VtepPolicyKey{}, + &VtepPolicyVal{}, + defaults.MaxVtepPolicyEntries, + 0, + ).WithCache().WithPressureMetric(registry). + WithEvents(option.Config.GetEventBufferConfig(VtepPolicyMapName)) + + lc.Append(cell.Hook{ + OnStart: func(cell.HookContext) error { + switch pinning { + case ebpf.PinNone: + return m.CreateUnpinned() + case ebpf.PinByName: + return m.OpenOrCreate() + } + return fmt.Errorf("received unexpected pin type: %d", pinning) + }, + OnStop: func(cell.HookContext) error { + return m.Close() + }, + }) + + return &VtepPolicyMap{m} +} + +func NewVal(newTunnelEndpoint netip.Addr, vtepMAC mac.MAC) VtepPolicyVal { + mac, _ := vtepMAC.Uint64() + + value := VtepPolicyVal{ + Mac: mac, + } + + ip4 := newTunnelEndpoint.As4() + copy(value.VtepIp[:], ip4[:]) + + return value +} + +// OpenPinnedVtepPolicyMap opens an existing pinned IPv4 policy map. +func OpenPinnedVtepPolicyMap(logger *slog.Logger) (*VtepPolicyMap, error) { + m, err := bpf.OpenMap(bpf.MapPath(logger, VtepPolicyMapName), &VtepPolicyKey{}, &VtepPolicyVal{}) + if err != nil { + return nil, err + } + + return &VtepPolicyMap{m}, nil +} + +// Function to update vtep map with VTEP CIDR +func (m *VtepPolicyMap) UpdateVtepPolicyMapping(srcIP netip.Addr, dstCIDR netip.Prefix, newTunnelEndpoint netip.Addr, vtepMAC mac.MAC) error { + key := NewKey(srcIP, dstCIDR) + value := NewVal(newTunnelEndpoint, vtepMAC) + + return m.m.Update(&key, &value) +} + +func (m *VtepPolicyMap) RemoveVtepPolicyMapping(srcIP netip.Addr, dstCIDR netip.Prefix) error { + key := NewKey(srcIP, dstCIDR) + return m.m.Delete(&key) +} + +func (m *VtepPolicyMap) Delete(key *VtepPolicyKey) error { + return m.m.Delete(key) +} + +func (m *VtepPolicyMap) Lookup(key *VtepPolicyKey) (*VtepPolicyVal, error) { + ret, err := m.m.Lookup(key) + if err != nil { + return nil, err + } + return ret.(*VtepPolicyVal), err +} + +// VtepPolicyIterateCallback represents the signature of the callback function +// expected by the IterateWithCallback method, which in turn is used to iterate +// all the keys/values of an vtep policy map. +type VtepPolicyIterateCallback func(*VtepPolicyKey, *VtepPolicyVal) + +// IterateWithCallback iterates through all the keys/values of an vtep policy +// map, passing each key/value pair to the cb callback. +func (m *VtepPolicyMap) IterateWithCallback(cb VtepPolicyIterateCallback) error { + return m.m.DumpWithCallback(func(k bpf.MapKey, v bpf.MapValue) { + key := k.(*VtepPolicyKey) + value := v.(*VtepPolicyVal) + + cb(key, value) + }) +} + +func (k *VtepPolicyKey) Match(ip netip.Addr, destCIDR netip.Prefix) bool { + nkey := NewKey(ip, destCIDR) + return nkey == *k +} + +func (v *VtepPolicyVal) Match(vtepIP netip.Addr, rmac mac.MAC) bool { + nval := NewVal(vtepIP, rmac) + return nval == *v +} diff --git a/pkg/monitor/datapath_drop.go b/pkg/monitor/datapath_drop.go index d244a0f82ba41..a1e34ceed595b 100644 --- a/pkg/monitor/datapath_drop.go +++ b/pkg/monitor/datapath_drop.go @@ -17,6 +17,7 @@ const ( DropNotifyVersion0 = iota DropNotifyVersion1 DropNotifyVersion2 + DropNotifyVersion3 ) const ( @@ -24,6 +25,8 @@ const ( dropNotifyV1Len = 36 // dropNotifyV2Len is the amount of packet data provided in a v2 drop notification. dropNotifyV2Len = 40 + // dropNotifyV3Len is the amount of packet data provided in a v3 drop notification. + dropNotifyV3Len = 48 ) const ( @@ -42,27 +45,29 @@ var ( DropNotifyVersion0: dropNotifyV1Len, // retain backwards compatibility for testing. DropNotifyVersion1: dropNotifyV1Len, DropNotifyVersion2: dropNotifyV2Len, + DropNotifyVersion3: dropNotifyV3Len, } ) // DropNotify is the message format of a drop notification in the BPF ring buffer type DropNotify struct { - Type uint8 - SubType uint8 - Source uint16 - Hash uint32 - OrigLen uint32 - CapLen uint16 - Version uint16 - SrcLabel identity.NumericIdentity - DstLabel identity.NumericIdentity - DstID uint32 - Line uint16 - File uint8 - ExtError int8 - Ifindex uint32 - Flags uint8 - _ [3]uint8 + Type uint8 + SubType uint8 + Source uint16 + Hash uint32 + OrigLen uint32 + CapLen uint16 + Version uint16 + SrcLabel identity.NumericIdentity + DstLabel identity.NumericIdentity + DstID uint32 + Line uint16 + File uint8 + ExtError int8 + Ifindex uint32 + Flags uint8 + _ [3]uint8 + IPTraceID uint64 // data } @@ -108,7 +113,7 @@ func (n *DropNotify) Decode(data []byte) error { version := byteorder.Native.Uint16(data[14:16]) // Check against max version. - if version > DropNotifyVersion2 { + if version > DropNotifyVersion3 { return fmt.Errorf("Unrecognized drop event (version %d)", version) } @@ -120,6 +125,13 @@ func (n *DropNotify) Decode(data []byte) error { n.Flags = data[36] } + if version >= DropNotifyVersion3 { + if l := len(data); l < dropNotifyV3Len { + return fmt.Errorf("unexpected DropNotify data length (version %d), expected at least %d but got %d", version, dropNotifyV3Len, l) + } + n.IPTraceID = byteorder.Native.Uint64(data[40:48]) + } + // Decode logic for version >= v0/v1. n.Type = data[0] n.SubType = data[1] @@ -171,6 +183,9 @@ func (n *DropNotify) DataOffset() uint { func (n *DropNotify) DumpInfo(buf *bufio.Writer, data []byte, numeric api.DisplayFormat) { fmt.Fprintf(buf, "xx drop (%s) flow %#x to endpoint %d, ifindex %d, file %s:%d, ", api.DropReasonExt(n.SubType, n.ExtError), n.Hash, n.DstID, n.Ifindex, api.BPFFileName(n.File), int(n.Line)) + if id := n.IPTraceID; id > 0 { + fmt.Fprintf(buf, " [ ip-trace-id = %d ]", id) + } n.dumpIdentity(buf, numeric) fmt.Fprintf(buf, ": %s\n", GetConnectionSummary(data[n.DataOffset():], &decodeOpts{n.IsL3Device(), n.IsIPv6(), n.IsVXLAN(), n.IsGeneve()})) } @@ -190,13 +205,16 @@ func (n *DropNotify) DumpVerbose(buf *bufio.Writer, dissect bool, data []byte, p fmt.Fprintf(buf, "\n") } + if id := n.IPTraceID; id > 0 { + fmt.Fprintf(buf, " [ IP-TRACE-ID=%d", id) + } + if offset := int(n.DataOffset()); n.CapLen > 0 && len(data) > offset { Dissect(buf, dissect, data[offset:], &decodeOpts{n.IsL3Device(), n.IsIPv6(), n.IsVXLAN(), n.IsGeneve()}) } } func (n *DropNotify) getJSON(data []byte, cpuPrefix string) (string, error) { - v := DropNotifyToVerbose(n) v.CPUPrefix = cpuPrefix if offset := int(n.DataOffset()); n.CapLen > 0 && len(data) > offset { @@ -222,15 +240,16 @@ type DropNotifyVerbose struct { Mark string `json:"mark,omitempty"` Reason string `json:"reason,omitempty"` - Source uint16 `json:"source"` - Bytes uint32 `json:"bytes"` - SrcLabel identity.NumericIdentity `json:"srcLabel"` - DstLabel identity.NumericIdentity `json:"dstLabel"` - DstID uint32 `json:"dstID"` - Line uint16 `json:"Line"` - File uint8 `json:"File"` - ExtError int8 `json:"ExtError"` - Ifindex uint32 `json:"Ifindex"` + Source uint16 `json:"source"` + Bytes uint32 `json:"bytes"` + SrcLabel identity.NumericIdentity `json:"srcLabel"` + DstLabel identity.NumericIdentity `json:"dstLabel"` + DstID uint32 `json:"dstID"` + Line uint16 `json:"Line"` + File uint8 `json:"File"` + ExtError int8 `json:"ExtError"` + Ifindex uint32 `json:"Ifindex"` + IPTraceID uint64 `json:"IPTraceID,omitempty"` Summary *DissectSummary `json:"summary,omitempty"` } @@ -238,17 +257,18 @@ type DropNotifyVerbose struct { // DropNotifyToVerbose creates verbose notification from DropNotify func DropNotifyToVerbose(n *DropNotify) DropNotifyVerbose { return DropNotifyVerbose{ - Type: "drop", - Mark: fmt.Sprintf("%#x", n.Hash), - Reason: api.DropReasonExt(n.SubType, n.ExtError), - Source: n.Source, - Bytes: n.OrigLen, - SrcLabel: n.SrcLabel, - DstLabel: n.DstLabel, - DstID: n.DstID, - Line: n.Line, - File: n.File, - ExtError: n.ExtError, - Ifindex: n.Ifindex, + Type: "drop", + Mark: fmt.Sprintf("%#x", n.Hash), + Reason: api.DropReasonExt(n.SubType, n.ExtError), + Source: n.Source, + Bytes: n.OrigLen, + SrcLabel: n.SrcLabel, + DstLabel: n.DstLabel, + DstID: n.DstID, + Line: n.Line, + File: n.File, + ExtError: n.ExtError, + Ifindex: n.Ifindex, + IPTraceID: n.IPTraceID, } } diff --git a/pkg/monitor/datapath_drop_test.go b/pkg/monitor/datapath_drop_test.go index c1ab8b5d9373a..1f3f630e3f83d 100644 --- a/pkg/monitor/datapath_drop_test.go +++ b/pkg/monitor/datapath_drop_test.go @@ -8,76 +8,205 @@ import ( "encoding/binary" "testing" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" "github.com/cilium/cilium/pkg/byteorder" ) -func TestDecodeDropNotify(t *testing.T) { +func TestDropNotifyV1_Decode(t *testing.T) { // This check on the struct length constant is there to ensure that this // test is updated when the struct changes. - require.Equal(t, 40, dropNotifyV2Len) + require.Equal(t, 36, dropNotifyV1Len) + + testCases := []struct { + name string + input DropNotify + }{ + { + name: "empty", + }, + { + name: "arbitrary", + input: DropNotify{ + Type: 0x00, + SubType: 0x01, + Source: 0x02_03, + Hash: 0x04_05_06_07, + OrigLen: 0x08_09_0a_0b, + CapLen: 0x0e_10, + Version: 0x00_01, + SrcLabel: 0x11_12_13_14, + DstLabel: 0x15_16_17_18, + DstID: 0x19_1a_1b_1c, + Line: 0x1d_1e, + File: 0x20, + ExtError: 0x21, + Ifindex: 0x22_23_24_25, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + buf := bytes.NewBuffer(nil) + if err := binary.Write(buf, byteorder.Native, tc.input); err != nil { + t.Fatalf("Unexpected error from Write(...); got: %v", err) + } + + output := DropNotify{} + if err := output.Decode(buf.Bytes()); err != nil { + t.Fatalf("Unexpected error from Decode(); got: %v", err) + } + + if diff := cmp.Diff(tc.input, output); diff != "" { + t.Errorf("Unexpected diff (-want +got):\n%s", diff) + } + }) + } +} - input := DropNotify{ - Type: 0x00, - SubType: 0x01, - Source: 0x02_03, - Hash: 0x04_05_06_07, - OrigLen: 0x08_09_0a_0b, - CapLen: 0x0c_0d, - Version: 0x02, - SrcLabel: 0x11_12_13_14, - DstLabel: 0x15_16_17_18, - DstID: 0x19_1a_1b_1c, - Line: 0x1d_1e, - File: 0x20, - ExtError: 0x21, - Ifindex: 0x22_23_24_25, - Flags: 0x0f, +func TestDropNotify_Decode(t *testing.T) { + // This check on the struct length constant is there to ensure that this + // test is updated when the struct changes. + require.Equal(t, 40, dropNotifyV2Len) + require.Equal(t, 48, dropNotifyV3Len) + + testCases := []struct { + name string + input DropNotify + }{ + { + name: "empty", + }, + { + name: "arbitrary", + input: DropNotify{ + Type: 0x00, + SubType: 0x01, + Source: 0x02_03, + Hash: 0x04_05_06_07, + OrigLen: 0x08_09_0a_0b, + CapLen: 0x0e_10, + Version: 0x00_03, + SrcLabel: 0x11_12_13_14, + DstLabel: 0x15_16_17_18, + DstID: 0x19_1a_1b_1c, + Line: 0x1d_1e, + File: 0x20, + ExtError: 0x21, + Ifindex: 0x22_23_24_25, + Flags: 0x0f, + IPTraceID: 0x99, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + buf := bytes.NewBuffer(nil) + if err := binary.Write(buf, byteorder.Native, tc.input); err != nil { + t.Fatalf("Unexpected error from Write(...); got: %v", err) + } + + output := DropNotify{} + if err := output.Decode(buf.Bytes()); err != nil { + t.Fatalf("Unexpected error from Decode(); got: %v", err) + } + + if diff := cmp.Diff(tc.input, output); diff != "" { + t.Errorf("Unexpected diff (-want +got):\n%s", diff) + } + }) } - buf := bytes.NewBuffer(nil) - err := binary.Write(buf, byteorder.Native, input) - require.NoError(t, err) - - output := &DropNotify{} - err = output.Decode(buf.Bytes()) - require.NoError(t, err) - - require.Equal(t, input.Type, output.Type) - require.Equal(t, input.SubType, output.SubType) - require.Equal(t, input.Source, output.Source) - require.Equal(t, input.Hash, output.Hash) - require.Equal(t, input.OrigLen, output.OrigLen) - require.Equal(t, input.CapLen, output.CapLen) - require.Equal(t, input.SrcLabel, output.SrcLabel) - require.Equal(t, input.DstLabel, output.DstLabel) - require.Equal(t, input.DstID, output.DstID) - require.Equal(t, input.Line, output.Line) - require.Equal(t, input.File, output.File) - require.Equal(t, input.ExtError, output.ExtError) - require.Equal(t, input.Ifindex, output.Ifindex) - require.Equal(t, input.Flags, output.Flags) - require.True(t, output.IsL3Device()) - require.True(t, output.IsIPv6()) - require.True(t, output.IsVXLAN()) - require.True(t, output.IsGeneve()) } -func TestDecodeDropNotifyErrors(t *testing.T) { - n := DropNotify{} - err := n.Decode([]byte{}) - require.Error(t, err) - require.Equal(t, "unexpected DropNotify data length, expected at least 36 but got 0", err.Error()) - - // invalid version - ev := make([]byte, dropNotifyV2Len) - ev[14] = 0xff - err = n.Decode(ev) - require.Error(t, err) - require.Equal(t, "Unrecognized drop event (version 255)", err.Error()) +func TestDecodeDropNotify(t *testing.T) { + testCases := []struct { + name string + input any + want uint + }{ + { + name: "v1", + input: DropNotify{ + Type: 0x00, + SubType: 0x01, + Source: 0x02_03, + Hash: 0x04_05_06_07, + OrigLen: 0x08_09_0a_0b, + CapLen: 0x0e_10, + Version: 0x00_01, + SrcLabel: 0x11_12_13_14, + DstLabel: 0x15_16_17_18, + DstID: 0x19_1a_1b_1c, + Line: 0x1d_1e, + File: 0x20, + ExtError: 0x21, + Ifindex: 0x22_23_24_25, + }, + want: dropNotifyV1Len, + }, + { + name: "v2", + input: DropNotify{ + Type: 0x00, + SubType: 0x01, + Source: 0x02_03, + Hash: 0x04_05_06_07, + OrigLen: 0x08_09_0a_0b, + CapLen: 0x0e_10, + Version: 0x00_02, + SrcLabel: 0x11_12_13_14, + DstLabel: 0x15_16_17_18, + DstID: 0x19_1a_1b_1c, + Line: 0x1d_1e, + File: 0x20, + ExtError: 0x21, + Ifindex: 0x22_23_24_25, + }, + want: dropNotifyV2Len, + }, + { + name: "with_iptrace", + input: DropNotify{ + Type: 0x00, + SubType: 0x01, + Source: 0x02_03, + Hash: 0x04_05_06_07, + OrigLen: 0x08_09_0a_0b, + CapLen: 0x0e_10, + Version: 0x00_03, + SrcLabel: 0x11_12_13_14, + DstLabel: 0x15_16_17_18, + DstID: 0x19_1a_1b_1c, + Line: 0x1d_1e, + File: 0x20, + ExtError: 0x21, + Ifindex: 0x22_23_24_25, + IPTraceID: 0x999, + }, + want: dropNotifyV3Len, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + buf := bytes.NewBuffer(nil) + if err := binary.Write(buf, byteorder.Native, tc.input); err != nil { + t.Fatalf("Unexpected error from Write(...); got: %v", err) + } + + output := DropNotify{} + if err := output.Decode(buf.Bytes()); err != nil { + t.Fatalf("Unexpected error from Decode(); got: %v", err) + } + + if got := output.DataOffset(); got != tc.want { + t.Fatalf("Unexpected DataOffset(); want %d, got %d", tc.want, got) + } + }) + } } -func BenchmarkDecodeDropNotifyV1(b *testing.B) { +func BenchmarkNewDropNotifyV1_Decode(b *testing.B) { input := DropNotify{} buf := bytes.NewBuffer(nil) @@ -95,8 +224,8 @@ func BenchmarkDecodeDropNotifyV1(b *testing.B) { } } -func BenchmarkDecodeDropNotifyV2(b *testing.B) { - input := DropNotify{Version: DropNotifyVersion2} +func BenchmarkOldDropNotifyV1_Decode(b *testing.B) { + input := DropNotify{} buf := bytes.NewBuffer(nil) if err := binary.Write(buf, byteorder.Native, input); err != nil { @@ -107,7 +236,7 @@ func BenchmarkDecodeDropNotifyV2(b *testing.B) { for b.Loop() { dn := &DropNotify{} - if err := dn.Decode(buf.Bytes()); err != nil { + if err := binary.Read(bytes.NewReader(buf.Bytes()), byteorder.Native, dn); err != nil { b.Fatal(err) } } diff --git a/pkg/monitor/datapath_trace.go b/pkg/monitor/datapath_trace.go index f8375eb1e654f..5215cfc40a001 100644 --- a/pkg/monitor/datapath_trace.go +++ b/pkg/monitor/datapath_trace.go @@ -21,6 +21,8 @@ const ( traceNotifyV0Len = 32 // traceNotifyV1Len is the amount of packet data provided in a trace notification v1. traceNotifyV1Len = 48 + // traceNotifyV2Len is the amount of packet data provided in a trace notification v2. + traceNotifyV2Len = 56 ) const ( @@ -41,24 +43,26 @@ const ( const ( TraceNotifyVersion0 = iota TraceNotifyVersion1 + TraceNotifyVersion2 ) // TraceNotify is the message format of a trace notification in the BPF ring buffer type TraceNotify struct { - Type uint8 - ObsPoint uint8 - Source uint16 - Hash uint32 - OrigLen uint32 - CapLen uint16 - Version uint16 - SrcLabel identity.NumericIdentity - DstLabel identity.NumericIdentity - DstID uint16 - Reason uint8 - Flags uint8 - Ifindex uint32 - OrigIP types.IPv6 + Type uint8 + ObsPoint uint8 + Source uint16 + Hash uint32 + OrigLen uint32 + CapLen uint16 + Version uint16 + SrcLabel identity.NumericIdentity + DstLabel identity.NumericIdentity + DstID uint16 + Reason uint8 + Flags uint8 + Ifindex uint32 + OrigIP types.IPv6 + IPTraceID uint64 // data } @@ -94,12 +98,19 @@ func (tn *TraceNotify) Decode(data []byte) error { version := byteorder.Native.Uint16(data[14:16]) // Check against max version. - if version > TraceNotifyVersion1 { + if version > TraceNotifyVersion2 { return fmt.Errorf("Unrecognized trace event (version %d)", version) } // Decode logic for version >= v1. - if version >= TraceNotifyVersion1 { + switch version { + case TraceNotifyVersion2: + if l := len(data); l < traceNotifyV2Len { + return fmt.Errorf("unexpected TraceNotify data length (version %d), expected at least %d but got %d", version, traceNotifyV2Len, l) + } + tn.IPTraceID = byteorder.Native.Uint64(data[48:56]) + fallthrough + case TraceNotifyVersion1: if l := len(data); l < traceNotifyV1Len { return fmt.Errorf("unexpected TraceNotify data length (version %d), expected at least %d but got %d", version, traceNotifyV1Len, l) } @@ -172,6 +183,7 @@ var ( traceNotifyLength = map[uint16]uint{ TraceNotifyVersion0: traceNotifyV0Len, TraceNotifyVersion1: traceNotifyV1Len, + TraceNotifyVersion2: traceNotifyV2Len, } ) @@ -316,14 +328,23 @@ func (n *TraceNotify) DumpInfo(buf *bufio.Writer, data []byte, numeric api.Displ } n.dumpIdentity(buf, numeric) ifname := linkMonitor.Name(n.Ifindex) - fmt.Fprintf(buf, " state %s ifindex %s orig-ip %s: %s\n", n.traceReasonString(), - ifname, n.OriginalIP().String(), GetConnectionSummary(data[hdrLen:], &decodeOpts{n.IsL3Device(), n.IsIPv6(), n.IsVXLAN(), n.IsGeneve()})) + + if id := n.IPTraceID; id > 0 { + fmt.Fprintf(buf, " [ ip-trace-id = %d ]", id) + } + fmt.Fprintf(buf, " state %s ifindex %s orig-ip %s: %s\n", + n.traceReasonString(), ifname, n.OriginalIP().String(), GetConnectionSummary(data[hdrLen:], &decodeOpts{n.IsL3Device(), n.IsIPv6(), n.IsVXLAN(), n.IsGeneve()})) + buf.Flush() } // DumpVerbose prints the trace notification in human readable form func (n *TraceNotify) DumpVerbose(buf *bufio.Writer, dissect bool, data []byte, prefix string, numeric api.DisplayFormat, linkMonitor getters.LinkGetter) { - fmt.Fprintf(buf, "%s MARK %#x FROM %d %s: %d bytes (%d captured), state %s", - prefix, n.Hash, n.Source, api.TraceObservationPoint(n.ObsPoint), n.OrigLen, n.CapLen, n.traceReasonString()) + fmt.Fprintf(buf, "%s MARK %#x", prefix, n.Hash) + if id := n.IPTraceID; id > 0 { + fmt.Fprintf(buf, " [ IP-TRACE-ID = %d ]", id) + } + fmt.Fprintf(buf, " FROM %d %s: %d bytes (%d captured), state %s", + n.Source, api.TraceObservationPoint(n.ObsPoint), n.OrigLen, n.CapLen, n.traceReasonString()) if n.Ifindex != 0 { ifname := linkMonitor.Name(n.Ifindex) @@ -383,11 +404,12 @@ type TraceNotifyVerbose struct { ObservationPoint string `json:"observationPoint"` TraceSummary string `json:"traceSummary"` - Source uint16 `json:"source"` - Bytes uint32 `json:"bytes"` - SrcLabel identity.NumericIdentity `json:"srcLabel"` - DstLabel identity.NumericIdentity `json:"dstLabel"` - DstID uint16 `json:"dstID"` + Source uint16 `json:"source"` + Bytes uint32 `json:"bytes"` + SrcLabel identity.NumericIdentity `json:"srcLabel"` + DstLabel identity.NumericIdentity `json:"dstLabel"` + DstID uint16 `json:"dstID"` + IPTraceID uint64 `json:"IpTraceID"` Summary *DissectSummary `json:"summary,omitempty"` } @@ -407,5 +429,6 @@ func TraceNotifyToVerbose(n *TraceNotify, linkMonitor getters.LinkGetter) TraceN SrcLabel: n.SrcLabel, DstLabel: n.DstLabel, DstID: n.DstID, + IPTraceID: n.IPTraceID, } } diff --git a/pkg/monitor/datapath_trace_test.go b/pkg/monitor/datapath_trace_test.go index b502965c12d94..855843e300426 100644 --- a/pkg/monitor/datapath_trace_test.go +++ b/pkg/monitor/datapath_trace_test.go @@ -18,7 +18,7 @@ import ( func TestDecodeTraceNotify(t *testing.T) { // This check on the struct length constant is there to ensure that this // test is updated when the struct changes. - require.Equal(t, 48, traceNotifyV1Len) + require.Equal(t, 56, traceNotifyV2Len) in := TraceNotify{ Type: 0x00, @@ -27,20 +27,20 @@ func TestDecodeTraceNotify(t *testing.T) { Hash: 0x05_06_07_08, OrigLen: 0x09_0a_0b_0c, CapLen: 0x0d_0e, - Version: TraceNotifyVersion1, - SrcLabel: identity.NumericIdentity(0x_11_12_13_14), - DstLabel: identity.NumericIdentity(0x_15_16_17_18), + Version: TraceNotifyVersion2, + SrcLabel: identity.NumericIdentity(0x11_12_13_14), + DstLabel: identity.NumericIdentity(0x15_16_17_18), DstID: 0x19_1a, Reason: 0x1b, Flags: 0x1c, Ifindex: 0x1d_1e_1f_20, OrigIP: types.IPv6{ - 0x21, 0x22, - 0x23, 0x24, - 0x25, 0x26, - 0x27, 0x28, - 0x29, 0x2a, + 0x21, 0x22, 0x23, 0x24, + 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, + 0x2d, 0x2e, 0x2f, 0x30, }, + IPTraceID: 0x2b_2c_2d_2e_2f_30_31_32, } buf := bytes.NewBuffer(nil) err := binary.Write(buf, byteorder.Native, in) @@ -63,6 +63,7 @@ func TestDecodeTraceNotify(t *testing.T) { require.Equal(t, in.Flags, out.Flags) require.Equal(t, in.Ifindex, out.Ifindex) require.Equal(t, in.OrigIP, out.OrigIP) + require.Equal(t, in.IPTraceID, out.IPTraceID) } func TestDecodeTraceNotifyErrors(t *testing.T) { @@ -330,7 +331,9 @@ func BenchmarkDecodeTraceNotifyVersion0(b *testing.B) { } func BenchmarkDecodeTraceNotifyVersion1(b *testing.B) { - input := TraceNotify{Version: TraceNotifyVersion1} + input := TraceNotify{ + Version: TraceNotifyVersion1, + } buf := bytes.NewBuffer(nil) if err := binary.Write(buf, byteorder.Native, input); err != nil { @@ -340,7 +343,9 @@ func BenchmarkDecodeTraceNotifyVersion1(b *testing.B) { b.ReportAllocs() for b.Loop() { - tn := &TraceNotify{Version: TraceNotifyVersion1} + tn := &TraceNotify{ + Version: TraceNotifyVersion1, + } if err := tn.Decode(buf.Bytes()); err != nil { b.Fatal(err) } diff --git a/pkg/mtu/mtu.go b/pkg/mtu/mtu.go index 3813267979b01..85fd2a58d3ea4 100644 --- a/pkg/mtu/mtu.go +++ b/pkg/mtu/mtu.go @@ -106,25 +106,10 @@ func (c Configuration) Calculate(baseMTU int) RouteMTU { return RouteMTU{ DeviceMTU: c.getDeviceMTU(baseMTU), RouteMTU: c.getRouteMTU(baseMTU), - RoutePostEncryptMTU: c.getRoutePostEncryptMTU(baseMTU), + RoutePostEncryptMTU: c.getDeviceMTU(baseMTU), } } -// GetRoutePostEncryptMTU return the MTU to be used on the encryption routing -// table. This is the MTU without encryption overhead and in the tunnel -// case accounts for the tunnel overhead. -func (c *Configuration) getRoutePostEncryptMTU(baseMTU int) int { - if c.encapEnabled { - postEncryptMTU := baseMTU - c.tunnelOverhead - if postEncryptMTU == 0 { - return EthernetMTU - c.tunnelOverhead - } - return postEncryptMTU - - } - return c.getDeviceMTU(baseMTU) -} - // GetRouteMTU returns the MTU to be used on the network. When running in // tunneling mode and/or with encryption enabled, this will have tunnel and // encryption overhead accounted for. diff --git a/pkg/node/local_node_store.go b/pkg/node/local_node_store.go index 854f880d29a5f..85d66db1af022 100644 --- a/pkg/node/local_node_store.go +++ b/pkg/node/local_node_store.go @@ -11,6 +11,7 @@ import ( "github.com/cilium/hive/job" "github.com/cilium/statedb" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/node/types" "github.com/cilium/cilium/pkg/rate" "github.com/cilium/cilium/pkg/source" @@ -43,12 +44,13 @@ var LocalNodeStoreCell = cell.Module( type LocalNodeStoreParams struct { cell.In - Logger *slog.Logger - Lifecycle cell.Lifecycle - Sync LocalNodeSynchronizer - DB *statedb.DB - Nodes statedb.RWTable[*LocalNode] - Jobs job.Group + Logger *slog.Logger + Lifecycle cell.Lifecycle + Sync LocalNodeSynchronizer + DB *statedb.DB + Nodes statedb.RWTable[*LocalNode] + Jobs job.Group + ClusterInfo cmtypes.ClusterInfo } // LocalNodeStore is the canonical owner for the local node object and provides @@ -69,7 +71,9 @@ func NewLocalNodeStore(params LocalNodeStoreParams) (*LocalNodeStore, error) { params.Nodes.Insert(wtxn, &LocalNode{ Node: types.Node{ - Name: types.GetName(), + Name: types.GetName(), + Cluster: params.ClusterInfo.Name, + ClusterID: params.ClusterInfo.ID, // Explicitly initialize the labels and annotations maps, so that // we don't need to always check for nil values. Labels: make(map[string]string), @@ -86,6 +90,9 @@ func NewLocalNodeStore(params LocalNodeStoreParams) (*LocalNodeStore, error) { OnStart: func(ctx cell.HookContext) error { wtxn := params.DB.WriteTxn(params.Nodes) n, _, _ := params.Nodes.Get(wtxn, LocalNodeQuery) + // Delete the initial one as name might change. + params.Nodes.Delete(wtxn, n) + n = n.DeepCopy() err := params.Sync.InitLocalNode(ctx, n) params.Nodes.Insert(wtxn, n) @@ -179,17 +186,30 @@ func (s *LocalNodeStore) Get(ctx context.Context) (LocalNode, error) { // Update modifies the local node with a mutator. func (s *LocalNodeStore) Update(update func(*LocalNode)) { txn := s.db.WriteTxn(s.nodes) - defer txn.Commit() + defer txn.Abort() ln, _, found := s.nodes.Get(txn, LocalNodeQuery) if !found { panic("BUG: No local node exists") } + orig := ln ln = ln.DeepCopy() update(ln) if ln.Local == nil { panic("BUG: Updated LocalNode has nil Local") } + + if ln.DeepEqual(orig) { + // No changes. + return + } + + if orig.Fullname() != ln.Fullname() { + // Name or cluster has changed, delete first to remove it from the name index. + s.nodes.Delete(txn, orig) + } + s.nodes.Insert(txn, ln) + txn.Commit() } func NewTestLocalNodeStore(mockNode LocalNode) *LocalNodeStore { diff --git a/pkg/node/local_node_store_test.go b/pkg/node/local_node_store_test.go index b6ba849f52610..a918f91bfce2f 100644 --- a/pkg/node/local_node_store_test.go +++ b/pkg/node/local_node_store_test.go @@ -13,6 +13,7 @@ import ( "github.com/cilium/hive/hivetest" "github.com/stretchr/testify/assert" + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/hive" . "github.com/cilium/cilium/pkg/node" ) @@ -78,6 +79,12 @@ func TestLocalNodeStore(t *testing.T) { hive := hive.New( LocalNodeStoreCell, + cell.Provide(func() cmtypes.ClusterInfo { + return cmtypes.ClusterInfo{ + Name: "test", + ID: 1, + } + }), cell.Provide(func() LocalNodeSynchronizer { return ts }), cell.Invoke(observe), cell.Invoke(update), diff --git a/pkg/node/manager/manager_test.go b/pkg/node/manager/manager_test.go index 1af66dbe977de..4147ddd1302fa 100644 --- a/pkg/node/manager/manager_test.go +++ b/pkg/node/manager/manager_test.go @@ -4,6 +4,7 @@ package manager import ( + "bytes" "context" "encoding/json" "fmt" @@ -1427,20 +1428,22 @@ func TestNodesStartupPruning(t *testing.T) { checkNodeFileMatches := func(t *testing.T, stateDir string, nodes ...nodeTypes.Node) { path := filepath.Join(stateDir, nodesFilename) - // Wait until the file exists. The node deletion triggers the write, hence - // this shouldn't take long. + var prevBytes []byte + require.EventuallyWithT(t, func(c *assert.CollectT) { - assert.FileExists(c, path) - }, time.Second, 10*time.Millisecond) - nwf, err := os.Open(path) - require.NoError(t, err) - t.Cleanup(func() { - nwf.Close() - }) - var nl []nodeTypes.Node - assert.NoError(t, json.NewDecoder(nwf).Decode(&nl)) - assert.ElementsMatch(t, nodes, nl) - require.NoError(t, os.Remove(path)) + checkpointBytes, err := os.ReadFile(path) + assert.NoError(c, err) + + if bytes.Equal(checkpointBytes, prevBytes) { + c.FailNow() + } + + prevBytes = checkpointBytes + + var nl []nodeTypes.Node + assert.NoError(c, json.Unmarshal(checkpointBytes, &nl)) + assert.ElementsMatch(c, nodes, nl) + }, time.Second*2, 100*time.Millisecond) } t.Run("cluster nodes synced first", func(t *testing.T) { diff --git a/pkg/option/config.go b/pkg/option/config.go index 362115a2bfc7b..d0ac87ff5e313 100644 --- a/pkg/option/config.go +++ b/pkg/option/config.go @@ -998,6 +998,9 @@ const ( // EnableExtendedIPProtocols controls whether traffic with extended IP protocols is supported in datapath. EnableExtendedIPProtocols = "enable-extended-ip-protocols" + + // IPTracingOptionType specifies what IPv4 option type should be used to extract trace information from a packet + IPTracingOptionType = "ip-tracing-option-type" ) // Default string arguments @@ -1889,6 +1892,9 @@ type DaemonConfig struct { // EnableExtendedIPProtocols controls whether traffic with extended IP protocols is supported in datapath EnableExtendedIPProtocols bool + + // IPTracingOptionType determines whether to enable IP tracing, and if enabled what option type to use. + IPTracingOptionType uint } var ( @@ -1947,6 +1953,8 @@ var ( EnableSourceIPVerification: defaults.EnableSourceIPVerification, ConnectivityProbeFrequencyRatio: defaults.ConnectivityProbeFrequencyRatio, + + IPTracingOptionType: defaults.IPTracingOptionType, } ) @@ -2580,6 +2588,7 @@ func (c *DaemonConfig) Populate(logger *slog.Logger, vp *viper.Viper) { c.BPFConntrackAccounting = vp.GetBool(BPFConntrackAccounting) c.BootIDFile = vp.GetString(BootIDFilename) c.EnableExtendedIPProtocols = vp.GetBool(EnableExtendedIPProtocols) + c.IPTracingOptionType = vp.GetUint(IPTracingOptionType) c.ServiceNoBackendResponse = vp.GetString(ServiceNoBackendResponse) switch c.ServiceNoBackendResponse { @@ -3224,8 +3233,7 @@ func (c *DaemonConfig) validateVTEP(vp *viper.Viper) error { vtepCidrMask := vp.GetString(VtepMask) vtepMACs := vp.GetStringSlice(VtepMAC) - if (len(vtepEndpoints) < 1) || - len(vtepEndpoints) != len(vtepCIDRs) || + if len(vtepEndpoints) != len(vtepCIDRs) || len(vtepEndpoints) != len(vtepMACs) { return fmt.Errorf("VTEP configuration must have the same number of Endpoint, VTEP and MAC configurations (Found %d endpoints, %d MACs, %d CIDR ranges)", len(vtepEndpoints), len(vtepMACs), len(vtepCIDRs)) } diff --git a/pkg/policy/l4.go b/pkg/policy/l4.go index 652cd07c9f1c2..a25253014aed0 100644 --- a/pkg/policy/l4.go +++ b/pkg/policy/l4.go @@ -224,14 +224,6 @@ func getAuthType(auth *api.Authentication) (bool, AuthType) { } } -// getAuthType returns the AuthType of the L4Filter. -func (a *PerSelectorPolicy) getAuthType() (bool, AuthType) { - if a == nil { - return false, types.AuthTypeDisabled - } - return getAuthType(a.Authentication) -} - // GetAuthRequirement returns the AuthRequirement of the L4Filter. func (a *PerSelectorPolicy) getAuthRequirement() AuthRequirement { if a == nil { @@ -601,15 +593,6 @@ type ChangeState struct { old mapStateMap // Old values of all modified or deleted keys, if not nil } -// NewRevertState returns an empty ChangeState suitable for reverting MapState changes. -// The private 'old' field is initialized so that old state can be restored if need be. -func NewRevertState() ChangeState { - return ChangeState{ - Adds: make(Keys), - old: make(mapStateMap), - } -} - func (c *ChangeState) Empty() bool { return len(c.Adds)+len(c.Deletes)+len(c.old) == 0 } diff --git a/pkg/policy/l4_test.go b/pkg/policy/l4_test.go index e4455f0327a21..bdee6901086d4 100644 --- a/pkg/policy/l4_test.go +++ b/pkg/policy/l4_test.go @@ -163,7 +163,7 @@ func TestCreateL4Filter(t *testing.T) { require.NoError(t, err) require.Len(t, filter.PerSelectorPolicies, 1) for _, sp := range filter.PerSelectorPolicies { - explicit, authType := sp.getAuthType() + explicit, authType := getAuthType(sp.Authentication) require.False(t, explicit) require.Equal(t, AuthTypeDisabled, authType) require.Equal(t, redirectTypeEnvoy, sp.redirectType()) @@ -173,7 +173,7 @@ func TestCreateL4Filter(t *testing.T) { require.NoError(t, err) require.Len(t, filter.PerSelectorPolicies, 1) for _, sp := range filter.PerSelectorPolicies { - explicit, authType := sp.getAuthType() + explicit, authType := getAuthType(sp.Authentication) require.False(t, explicit) require.Equal(t, AuthTypeDisabled, authType) require.Equal(t, redirectTypeEnvoy, sp.redirectType()) @@ -207,7 +207,7 @@ func TestCreateL4FilterAuthRequired(t *testing.T) { require.NoError(t, err) require.Len(t, filter.PerSelectorPolicies, 1) for _, sp := range filter.PerSelectorPolicies { - explicit, authType := sp.getAuthType() + explicit, authType := getAuthType(sp.Authentication) require.True(t, explicit) require.Equal(t, AuthTypeDisabled, authType) require.Equal(t, redirectTypeEnvoy, sp.redirectType()) @@ -217,7 +217,7 @@ func TestCreateL4FilterAuthRequired(t *testing.T) { require.NoError(t, err) require.Len(t, filter.PerSelectorPolicies, 1) for _, sp := range filter.PerSelectorPolicies { - explicit, authType := sp.getAuthType() + explicit, authType := getAuthType(sp.Authentication) require.True(t, explicit) require.Equal(t, AuthTypeDisabled, authType) require.Equal(t, redirectTypeEnvoy, sp.redirectType()) diff --git a/pkg/policy/origin.go b/pkg/policy/origin.go index e15cf5324ea5f..c5863efad2e37 100644 --- a/pkg/policy/origin.go +++ b/pkg/policy/origin.go @@ -130,16 +130,6 @@ func (ro ruleOrigin) Merge(other ruleOrigin) ruleOrigin { var NilRuleOrigin = newRuleOrigin(RuleMeta{labels: "[]"}) -type testOrigin map[CachedSelector]labels.LabelArrayList - -func OriginForTest(m testOrigin) map[CachedSelector]ruleOrigin { - res := make(map[CachedSelector]ruleOrigin, len(m)) - for cs, lbls := range m { - res[cs] = makeRuleOrigin(lbls, nil) - } - return res -} - // stringLabels is an interned labels.LabelArray.String() type stringLabels unique.Handle[labels.LabelArrayListString] diff --git a/pkg/policy/origin_test.go b/pkg/policy/origin_test.go index d56ba169a4e9f..5d350ff2042e7 100644 --- a/pkg/policy/origin_test.go +++ b/pkg/policy/origin_test.go @@ -11,6 +11,14 @@ import ( "github.com/cilium/cilium/pkg/labels" ) +func OriginForTest(m map[CachedSelector]labels.LabelArrayList) map[CachedSelector]ruleOrigin { + res := make(map[CachedSelector]ruleOrigin, len(m)) + for cs, lbls := range m { + res[cs] = makeRuleOrigin(lbls, nil) + } + return res +} + func TestRuleOrigin(t *testing.T) { lbls1 := labels.NewLabelsFromSortedList("k8s:a=1;k8s:b=1").LabelArray() lbls2 := labels.NewLabelsFromSortedList("k8s:a=2;k8s:b=2").LabelArray() diff --git a/pkg/policy/proxyid.go b/pkg/policy/proxyid.go index df9af6b148ff9..efd18904a80b1 100644 --- a/pkg/policy/proxyid.go +++ b/pkg/policy/proxyid.go @@ -7,9 +7,6 @@ import ( "fmt" "strconv" "strings" - - "github.com/cilium/cilium/pkg/policy/trafficdirection" - "github.com/cilium/cilium/pkg/u8proto" ) // ProxyStatsKey returns a key for endpoint's proxy stats, which may aggregate stats from multiple @@ -59,11 +56,6 @@ func ProxyID(endpointID uint16, ingress bool, protocol string, port uint16, list return str.String() } -// ProxyIDFromKey returns a unique string to identify a proxy mapping. -func ProxyIDFromKey(endpointID uint16, key Key, listener string) string { - return ProxyID(endpointID, key.TrafficDirection() == trafficdirection.Ingress, u8proto.U8proto(key.Nexthdr).String(), key.DestPort, listener) -} - // ParseProxyID parses a proxy ID returned by ProxyID and returns its components. func ParseProxyID(proxyID string) (endpointID uint16, ingress bool, protocol string, port uint16, listener string, err error) { comps := strings.Split(proxyID, ":") diff --git a/pkg/shell/client/shell_client.go b/pkg/shell/client/shell_client.go index 1fcc774998865..91964ae980280 100644 --- a/pkg/shell/client/shell_client.go +++ b/pkg/shell/client/shell_client.go @@ -17,11 +17,13 @@ import ( "github.com/spf13/cobra" "golang.org/x/term" - "github.com/cilium/cilium/pkg/defaults" + baseshell "github.com/cilium/cilium/pkg/shell" "github.com/cilium/cilium/pkg/time" "github.com/cilium/cilium/pkg/version" ) +var config = baseshell.DefaultConfig + var ShellCmd = &cobra.Command{ Use: "shell [command] [args]...", Short: "Connect to the Cilium shell", @@ -43,7 +45,7 @@ func dialShell(w io.Writer) (net.Conn, error) { for { var err error var d net.Dialer - conn, err = d.DialContext(ctx, "unix", defaults.ShellSockPath) + conn, err = d.DialContext(ctx, "unix", config.ShellSockPath) if err == nil { break } @@ -225,3 +227,8 @@ func printShellGreeting(term *term.Terminal) { fmt.Fprint(term, Blue+Blue+Blue+" \\__/"+Reset+"\n") fmt.Fprint(term, "\n") } + +// AddShellSockOption adds the --shell-sock-path to the command. +func AddShellSockOption(cmd *cobra.Command) { + config.Flags(cmd.Flags()) +} diff --git a/pkg/shell/config.go b/pkg/shell/config.go new file mode 100644 index 0000000000000..b78e55d6e2dba --- /dev/null +++ b/pkg/shell/config.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package shell + +import ( + "github.com/spf13/pflag" + + "github.com/cilium/cilium/pkg/defaults" +) + +const ShellSockPathName = "shell-sock-path" + +var DefaultConfig = Config{ShellSockPath: defaults.ShellSockPath} + +// Config is the configuration for the shell server. +type Config struct { + ShellSockPath string +} + +func (def Config) Flags(flags *pflag.FlagSet) { + flags.String(ShellSockPathName, def.ShellSockPath, "Path to the shell UNIX socket") +} diff --git a/pkg/shell/server/shell_server.go b/pkg/shell/server/shell_server.go index c64f44eebd99b..7e6ba7c279d3b 100644 --- a/pkg/shell/server/shell_server.go +++ b/pkg/shell/server/shell_server.go @@ -21,11 +21,14 @@ import ( "github.com/cilium/cilium/pkg/defaults" "github.com/cilium/cilium/pkg/logging/logfields" + baseshell "github.com/cilium/cilium/pkg/shell" ) var Cell = cell.Module( "shell", "Cilium debug shell", + + cell.Config(baseshell.DefaultConfig), cell.Invoke(registerShell), ) @@ -36,7 +39,7 @@ var defaultCmdsToInclude = []string{ "cat", "exec", "help", } -func registerShell(in hive.ScriptCmds, log *slog.Logger, jg job.Group) { +func registerShell(in hive.ScriptCmds, log *slog.Logger, jg job.Group, c baseshell.Config) { cmds := in.Map() defCmds := script.DefaultCmds() for _, name := range defaultCmdsToInclude { @@ -46,18 +49,19 @@ func registerShell(in hive.ScriptCmds, log *slog.Logger, jg job.Group) { Cmds: cmds, Conds: nil, } - jg.Add(job.OneShot("listener", shell{jg, log, &e}.listener)) + jg.Add(job.OneShot("listener", shell{jg, log, &e, c}.listener)) } type shell struct { jg job.Group log *slog.Logger engine *script.Engine + config baseshell.Config } func (sh shell) listener(ctx context.Context, health cell.Health) error { // Remove any old UNIX sock file from previous runs. - os.Remove(defaults.ShellSockPath) + os.Remove(sh.config.ShellSockPath) if _, err := os.Stat(defaults.RuntimePath); os.IsNotExist(err) { if err := os.MkdirAll(defaults.RuntimePath, defaults.RuntimePathRights); err != nil { @@ -66,9 +70,9 @@ func (sh shell) listener(ctx context.Context, health cell.Health) error { } var lc net.ListenConfig - l, err := lc.Listen(ctx, "unix", defaults.ShellSockPath) + l, err := lc.Listen(ctx, "unix", sh.config.ShellSockPath) if err != nil { - return fmt.Errorf("failed to listen on %q: %w", defaults.ShellSockPath, err) + return fmt.Errorf("failed to listen on %q: %w", sh.config.ShellSockPath, err) } var wg sync.WaitGroup @@ -80,7 +84,7 @@ func (sh shell) listener(ctx context.Context, health cell.Health) error { }() defer wg.Wait() - health.OK(fmt.Sprintf("Listening on %s", defaults.ShellSockPath)) + health.OK(fmt.Sprintf("Listening on %s", sh.config.ShellSockPath)) connCount := 0 for ctx.Err() == nil { conn, err := l.Accept() diff --git a/pkg/socketlb/socketlb.go b/pkg/socketlb/socketlb.go index 379aea7cc69fd..09da470bcbe0a 100644 --- a/pkg/socketlb/socketlb.go +++ b/pkg/socketlb/socketlb.go @@ -14,8 +14,9 @@ import ( "github.com/cilium/cilium/pkg/bpf" "github.com/cilium/cilium/pkg/cgroups" + "github.com/cilium/cilium/pkg/datapath/config" "github.com/cilium/cilium/pkg/datapath/linux/sysctl" - "github.com/cilium/cilium/pkg/kpr" + datapath "github.com/cilium/cilium/pkg/datapath/types" "github.com/cilium/cilium/pkg/option" ) @@ -56,20 +57,24 @@ func cgroupLinkPath() string { // options have changed. // It expects bpf_sock.c to be compiled previously, so that bpf_sock.o is present // in the Runtime dir. -func Enable(logger *slog.Logger, sysctl sysctl.Sysctl, kprCfg kpr.KPRConfig) error { +func Enable(logger *slog.Logger, sysctl sysctl.Sysctl, lnc *datapath.LocalNodeConfiguration) error { if err := os.MkdirAll(cgroupLinkPath(), 0777); err != nil { return fmt.Errorf("create bpffs link directory: %w", err) } - spec, err := bpf.LoadCollectionSpec(logger, filepath.Join(option.Config.StateDir, "bpf_sock.o")) + spec, err := ebpf.LoadCollectionSpec(filepath.Join(option.Config.StateDir, "bpf_sock.o")) if err != nil { return fmt.Errorf("failed to load collection spec for bpf_sock.o: %w", err) } + cfg := config.NewBPFSock(config.NodeConfig(lnc)) + cfg.EnableNoServiceEndpointsRoutable = lnc.SvcRouteConfig.EnableNoServiceEndpointsRoutable + coll, commit, err := bpf.LoadCollection(logger, spec, &bpf.CollectionOptions{ CollectionOptions: ebpf.CollectionOptions{ Maps: ebpf.MapOptions{PinPath: bpf.TCGlobalsPath()}, }, + Constants: cfg, }) var ve *ebpf.VerifierError if errors.As(err, &ve) { @@ -97,7 +102,7 @@ func Enable(logger *slog.Logger, sysctl sysctl.Sysctl, kprCfg kpr.KPRConfig) err enabled[GetPeerName4] = true } - if kprCfg.KubeProxyReplacement && option.Config.NodePortBindProtection { + if lnc.KPRConfig.KubeProxyReplacement && option.Config.NodePortBindProtection { enabled[PostBind4] = true } @@ -119,7 +124,7 @@ func Enable(logger *slog.Logger, sysctl sysctl.Sysctl, kprCfg kpr.KPRConfig) err enabled[GetPeerName6] = true } - if kprCfg.KubeProxyReplacement && option.Config.NodePortBindProtection { + if lnc.KPRConfig.KubeProxyReplacement && option.Config.NodePortBindProtection { enabled[PostBind6] = true } diff --git a/pkg/svcrouteconfig/cell.go b/pkg/svcrouteconfig/cell.go new file mode 100644 index 0000000000000..34b732b5a0acc --- /dev/null +++ b/pkg/svcrouteconfig/cell.go @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package svcrouteconfig + +import ( + "github.com/cilium/hive/cell" + "github.com/spf13/pflag" +) + +var Cell = cell.Module( + "service-route-config", + "Service route configuration", + + cell.Config(DefaultConfig), +) + +type RoutesConfig struct { + EnableNoServiceEndpointsRoutable bool +} + +var DefaultConfig = RoutesConfig{ + EnableNoServiceEndpointsRoutable: true, +} + +func (def RoutesConfig) Flags(flags *pflag.FlagSet) { + flags.Bool("enable-no-service-endpoints-routable", def.EnableNoServiceEndpointsRoutable, "Enable routes when service has 0 endpoints") +} diff --git a/pkg/testutils/goleak.go b/pkg/testutils/goleak.go index 3fc44735b0a5f..9402f21cc5b48 100644 --- a/pkg/testutils/goleak.go +++ b/pkg/testutils/goleak.go @@ -22,6 +22,7 @@ func defaultGoleakOptions() []goleak.Option { // Unfortunately we don't have a way for waiting for the workqueue's background goroutine // to exit (used by pkg/k8s/resource), so we'll just need to ignore it. goleak.IgnoreTopFunction("k8s.io/client-go/util/workqueue.(*Typed[...]).updateUnfinishedWorkLoop"), + goleak.IgnoreTopFunction("k8s.io/client-go/util/workqueue.(*delayingType[...]).waitingLoop"), } } diff --git a/pkg/vteppolicy/doc.go b/pkg/vteppolicy/doc.go new file mode 100644 index 0000000000000..c531ebefa32c6 --- /dev/null +++ b/pkg/vteppolicy/doc.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package vteppolicy defines an internal representation of the Cilium VTEP +// Policy. The structures are managed by the Manager. +package vteppolicy diff --git a/pkg/vteppolicy/endpoint.go b/pkg/vteppolicy/endpoint.go new file mode 100644 index 0000000000000..61e0fa8281db8 --- /dev/null +++ b/pkg/vteppolicy/endpoint.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package vteppolicy + +import ( + "fmt" + "net/netip" + + "k8s.io/apimachinery/pkg/types" + + k8sTypes "github.com/cilium/cilium/pkg/k8s/types" + "github.com/cilium/cilium/pkg/labels" +) + +// endpointMetadata stores relevant metadata associated with a endpoint that's updated during endpoint +// add/update events +type endpointMetadata struct { + // Endpoint labels + labels map[string]string + // Endpoint ID + id endpointID + // ips are endpoint's unique IPs + ips []netip.Addr + // nodeIP is the IP of the node the endpoint is on + nodeIP string +} + +// endpointID is based on endpoint's UID +type endpointID = types.UID + +func getEndpointMetadata(endpoint *k8sTypes.CiliumEndpoint, identityLabels labels.Labels) (*endpointMetadata, error) { + var addrs []netip.Addr + + if endpoint.UID == "" { + // this can happen when CiliumEndpointSlices are in use - which is not supported in the EGW yet + return nil, fmt.Errorf("endpoint has empty UID") + } + + if endpoint.Networking == nil { + return nil, fmt.Errorf("endpoint has no networking metadata") + } + + if len(endpoint.Networking.Addressing) == 0 { + return nil, fmt.Errorf("failed to get valid endpoint IPs") + } + + for _, pair := range endpoint.Networking.Addressing { + if pair.IPV4 != "" { + addr, err := netip.ParseAddr(pair.IPV4) + if err != nil { + continue + } + addrs = append(addrs, addr) + } + } + + data := &endpointMetadata{ + ips: addrs, + labels: identityLabels.K8sStringMap(), + id: endpoint.UID, + nodeIP: endpoint.Networking.NodeIP, + } + + return data, nil +} diff --git a/pkg/vteppolicy/helpers_test.go b/pkg/vteppolicy/helpers_test.go new file mode 100644 index 0000000000000..082c1c2ba7d6e --- /dev/null +++ b/pkg/vteppolicy/helpers_test.go @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package vteppolicy + +import ( + "context" + "errors" + "net/netip" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + "github.com/cilium/cilium/pkg/k8s/resource" + slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + k8sTypes "github.com/cilium/cilium/pkg/k8s/types" + "github.com/cilium/cilium/pkg/mac" + "github.com/cilium/cilium/pkg/policy/api" +) + +type fakeResource[T runtime.Object] chan resource.Event[T] + +func (fr fakeResource[T]) sync(tb testing.TB) { + var sync resource.Event[T] + sync.Kind = resource.Sync + fr.process(tb, sync) +} + +func (fr fakeResource[T]) process(tb testing.TB, ev resource.Event[T]) { + tb.Helper() + if err := fr.processWithError(ev); err != nil { + tb.Fatal("Failed to process event:", err) + } +} + +func (fr fakeResource[T]) processWithError(ev resource.Event[T]) error { + errs := make(chan error) + ev.Done = func(err error) { + errs <- err + } + fr <- ev + return <-errs +} + +func (fr fakeResource[T]) Observe(ctx context.Context, next func(event resource.Event[T]), complete func(error)) { + complete(errors.New("not implemented")) +} + +func (fr fakeResource[T]) Events(ctx context.Context, opts ...resource.EventsOpt) <-chan resource.Event[T] { + if len(opts) > 1 { + // Ideally we'd only ignore resource.WithRateLimit here, but that + // isn't possible. + panic("more than one option is not supported") + } + return fr +} + +func (fr fakeResource[T]) Store(context.Context) (resource.Store[T], error) { + return nil, errors.New("not implemented") +} + +func addPolicy(tb testing.TB, policies fakeResource[*Policy], params *policyParams) { + tb.Helper() + + policy, _ := newCVP(params) + policies.process(tb, resource.Event[*Policy]{ + Kind: resource.Upsert, + Object: policy, + }) +} + +type policyParams struct { + name string + endpointLabels map[string]string + podSelectors map[string]string + destinationCIDRs []string + podLabels map[string]string + vtepIP string + mac string +} + +func newCVP(params *policyParams) (*v2alpha1.CiliumVtepPolicy, *PolicyConfig) { + parsedDestinationCIDRs := make([]netip.Prefix, 0, len(params.destinationCIDRs)) + for _, destCIDR := range params.destinationCIDRs { + parsedDestinationCIDR, _ := netip.ParsePrefix(destCIDR) + parsedDestinationCIDRs = append(parsedDestinationCIDRs, parsedDestinationCIDR) + } + + parsedVtepIp, _ := netip.ParseAddr(params.vtepIP) + parsedMac, _ := mac.ParseMAC(params.mac) + + policy := &PolicyConfig{ + id: types.NamespacedName{ + Name: params.name, + }, + dstCIDRs: parsedDestinationCIDRs, + vtepConfig: vtepConfig{ + vtepIP: parsedVtepIp, + vtepMAC: parsedMac, + }, + } + + if len(params.podSelectors) != 0 { + policy.podSelectors = []api.EndpointSelector{ + { + LabelSelector: &slimv1.LabelSelector{ + MatchLabels: params.podSelectors, + }, + }, + } + } + + // Create destination CIDRs list + var destinationCIDRs []v2alpha1.CIDR + for _, destCIDR := range params.destinationCIDRs { + destinationCIDRs = append(destinationCIDRs, v2alpha1.CIDR(destCIDR)) + } + + cvp := &v2alpha1.CiliumVtepPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: params.name, + }, + Spec: v2alpha1.CiliumVtepPolicySpec{ + Selectors: []v2alpha1.CiliumVtepPolicyRules{ + { + PodSelector: &slimv1.LabelSelector{ + MatchLabels: params.endpointLabels, + }, + }, + }, + DestinationCIDRs: destinationCIDRs, + ExternalVTEP: &v2alpha1.ExternalVTEP{ + IP: params.vtepIP, + MAC: v2alpha1.MAC(params.mac), + }, + }, + TypeMeta: metav1.TypeMeta{}, + } + + if len(params.podSelectors) != 0 { + cvp.Spec.Selectors[0].PodSelector = &slimv1.LabelSelector{ + MatchLabels: params.podSelectors, + } + } + + return cvp, policy +} + +func addEndpoint(tb testing.TB, endpoints fakeResource[*k8sTypes.CiliumEndpoint], ep *k8sTypes.CiliumEndpoint) { + endpoints.process(tb, resource.Event[*k8sTypes.CiliumEndpoint]{ + Kind: resource.Upsert, + Object: ep, + }) +} + +func deleteEndpoint(tb testing.TB, endpoints fakeResource[*k8sTypes.CiliumEndpoint], ep *k8sTypes.CiliumEndpoint) { + endpoints.process(tb, resource.Event[*k8sTypes.CiliumEndpoint]{ + Kind: resource.Delete, + Object: ep, + }) +} diff --git a/pkg/vteppolicy/manager.go b/pkg/vteppolicy/manager.go new file mode 100644 index 0000000000000..134e3bb0e313c --- /dev/null +++ b/pkg/vteppolicy/manager.go @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package vteppolicy + +import ( + "context" + "fmt" + "log/slog" + "net/netip" + "strings" + "sync" + "sync/atomic" + + "github.com/cilium/hive/cell" + "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/workqueue" + + "github.com/cilium/cilium/pkg/datapath/linux/config/defines" + "github.com/cilium/cilium/pkg/datapath/tunnel" + "github.com/cilium/cilium/pkg/identity" + identityCache "github.com/cilium/cilium/pkg/identity/cache" + "github.com/cilium/cilium/pkg/k8s/resource" + k8sTypes "github.com/cilium/cilium/pkg/k8s/types" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/maps/vtep_policy" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/time" + "github.com/cilium/cilium/pkg/trigger" +) + +// Cell provides a [Manager] for consumption with hive. +var Cell = cell.Module( + "vteppolicy", + "Vtep Policy allows to use external VTEPs access pods", + cell.Config(defaultConfig), + cell.Provide(NewVtepPolicyManager), + cell.Provide(newPolicyResource), + cell.Provide(func(dcfg *option.DaemonConfig) tunnel.EnablerOut { + if !dcfg.EnableVTEP { + return tunnel.EnablerOut{} + } + return tunnel.NewEnabler(true) + }), +) + +type Config struct { + // Default amount of time between triggers of vtep policy state + // reconciliations are invoked + VtepPolicyReconciliationTriggerInterval time.Duration +} + +var defaultConfig = Config{ + VtepPolicyReconciliationTriggerInterval: 1 * time.Second, +} + +func (def Config) Flags(flags *pflag.FlagSet) { + flags.Duration("vtep-policy-reconciliation-trigger-interval", def.VtepPolicyReconciliationTriggerInterval, "Time between triggers of vtep policy state reconciliations") +} + +// The vtep policy manager stores the internal data tracking the node, policy, +// endpoint, and lease mappings. It also hooks up all the callbacks to update +// vteppolicy bpf policy map accordingly. +type Manager struct { + logger *slog.Logger + + // reconciliationEventsCount keeps track of how many reconciliation + // events have occoured + reconciliationEventsCount atomic.Uint64 + + // reconciliationTrigger is the trigger used to reconcile the state of + // the node with the desired vtep policy state. + // The trigger is used to batch multiple updates together + reconciliationTrigger *trigger.Trigger + + mu lock.Mutex + + // allCachesSynced is true when all k8s objects we depend on have had + // their initial state synced. + allCachesSynced bool + + // policies allows reading policy CRD from k8s. + policies resource.Resource[*Policy] + + // endpoints allows reading endpoint CRD from k8s. + endpoints resource.Resource[*k8sTypes.CiliumEndpoint] + + // policyConfigs stores policy configs indexed by policyID + policyConfigs map[policyID]*PolicyConfig + + // epDataStore stores endpointId to endpoint metadata mapping + epDataStore map[endpointID]*endpointMetadata + + // identityAllocator is used to fetch identity labels for endpoint updates + identityAllocator identityCache.IdentityAllocator + + // policyMap4 communicates the active IPv4 policies to the datapath. + policyMap *vtep_policy.VtepPolicyMap +} + +type Params struct { + cell.In + + Logger *slog.Logger + + Config Config + DaemonConfig *option.DaemonConfig + IdentityAllocator identityCache.IdentityAllocator + PolicyMap *vtep_policy.VtepPolicyMap + Policies resource.Resource[*Policy] + Endpoints resource.Resource[*k8sTypes.CiliumEndpoint] + + Lifecycle cell.Lifecycle +} + +func NewVtepPolicyManager(p Params) (out struct { + cell.Out + + *Manager + defines.NodeOut +}, err error) { + dcfg := p.DaemonConfig + out.Manager = nil + + if !dcfg.EnableVTEP { + return out, fmt.Errorf("vtep policy requires --%s=\"true\" ", option.EnableVTEP) + } + + out.Manager, err = newVtepPolicyManager(p) + if err != nil { + return out, err + } + + return out, nil +} + +func newVtepPolicyManager(p Params) (*Manager, error) { + manager := &Manager{ + logger: p.Logger, + policyConfigs: make(map[policyID]*PolicyConfig), + epDataStore: make(map[endpointID]*endpointMetadata), + identityAllocator: p.IdentityAllocator, + policies: p.Policies, + policyMap: p.PolicyMap, + endpoints: p.Endpoints, + } + + t, err := trigger.NewTrigger(trigger.Parameters{ + Name: "vtep_policy_reconciliation", + MinInterval: p.Config.VtepPolicyReconciliationTriggerInterval, + TriggerFunc: func(reasons []string) { + reason := strings.Join(reasons, ", ") + manager.logger.Debug("reconciliation triggered", logfields.Reason, reason) + + manager.mu.Lock() + defer manager.mu.Unlock() + + manager.reconcileLocked() + }, + }) + if err != nil { + return nil, err + } + + manager.reconciliationTrigger = t + + var wg sync.WaitGroup + + ctx, cancel := context.WithCancel(context.Background()) + p.Lifecycle.Append(cell.Hook{ + OnStart: func(hc cell.HookContext) error { + wg.Go(func() { + manager.processEvents(ctx) + }) + + return nil + }, + OnStop: func(hc cell.HookContext) error { + cancel() + + wg.Wait() + return nil + }, + }) + + return manager, nil +} + +// getIdentityLabels waits for the global identities to be populated to the cache, +// then looks up identity by ID from the cached identity allocator and return its labels. +func (manager *Manager) getIdentityLabels(securityIdentity uint32) (labels.Labels, error) { + if err := manager.identityAllocator.WaitForInitialGlobalIdentities(context.Background()); err != nil { + return nil, fmt.Errorf("failed to wait for initial global identities: %w", err) + } + + identity := manager.identityAllocator.LookupIdentityByID(context.Background(), identity.NumericIdentity(securityIdentity)) + if identity == nil { + return nil, fmt.Errorf("identity %d not found", securityIdentity) + } + return identity.Labels, nil +} + +// processEvents spawns a goroutine that waits for the agent to +// sync with k8s and then runs the first reconciliation. +func (manager *Manager) processEvents(ctx context.Context) { + var policySync, endpointSync bool + maybeTriggerReconcile := func() { + if !policySync || !endpointSync { + return + } + + manager.mu.Lock() + defer manager.mu.Unlock() + + if manager.allCachesSynced { + return + } + + manager.allCachesSynced = true + manager.reconciliationTrigger.TriggerWithReason("k8s sync done") + } + + // here we try to mimic the same exponential backoff retry logic used by + // the identity allocator, where the minimum retry timeout is set to 20 + // milliseconds and the max number of attempts is 16 (so 20ms * 2^16 == + // ~20 minutes) + endpointsRateLimit := workqueue.NewTypedItemExponentialFailureRateLimiter[resource.WorkItem]( + time.Millisecond*20, + time.Minute*20, + ) + + policyEvents := manager.policies.Events(ctx) + endpointEvents := manager.endpoints.Events(ctx, resource.WithRateLimiter(endpointsRateLimit)) + + for { + select { + case <-ctx.Done(): + return + + case event := <-policyEvents: + if event.Kind == resource.Sync { + policySync = true + maybeTriggerReconcile() + event.Done(nil) + } else { + manager.handlePolicyEvent(event) + } + + case event := <-endpointEvents: + if event.Kind == resource.Sync { + endpointSync = true + maybeTriggerReconcile() + event.Done(nil) + } else { + manager.handleEndpointEvent(event) + } + } + } +} + +func (manager *Manager) handlePolicyEvent(event resource.Event[*Policy]) { + switch event.Kind { + case resource.Upsert: + err := manager.onAddVtepPolicy(event.Object) + event.Done(err) + case resource.Delete: + manager.onDeleteVtepPolicy(event.Object) + event.Done(nil) + } +} + +// Event handlers + +// onAddVtepPolicy parses the given policy config, and updates internal state +// with the config fields. +func (manager *Manager) onAddVtepPolicy(policy *Policy) error { + logger := manager.logger.With(logfields.CiliumVtepPolicyName, policy.Name) + + config, err := ParseCVP(policy) + if err != nil { + logger.Warn("Failed to parse CiliumVtepPolicy", logfields.Error, err) + return err + } + + manager.mu.Lock() + defer manager.mu.Unlock() + + if _, ok := manager.policyConfigs[config.id]; !ok { + logger.Debug("Added CiliumVtepPolicy") + } else { + logger.Debug("Updated CiliumVtepPolicy") + } + + config.updateMatchedEndpointIDs(manager.epDataStore) + + manager.policyConfigs[config.id] = config + + manager.reconciliationTrigger.TriggerWithReason("policy added") + return nil +} + +// onDeleteVtepPolicy deletes the internal state associated with the given +// policy, including vteppolicy eBPF map entries. +func (manager *Manager) onDeleteVtepPolicy(policy *Policy) { + configID := ParseCVPConfigID(policy) + + manager.mu.Lock() + defer manager.mu.Unlock() + + logger := manager.logger.With(logfields.CiliumVtepPolicyName, configID.Name) + + if manager.policyConfigs[configID] == nil { + manager.logger.Warn("Can't delete CiliumVtepPolicy: policy not found") + } + + logger.Debug("Deleted CiliumVtepPolicy") + + delete(manager.policyConfigs, configID) + + manager.reconciliationTrigger.TriggerWithReason("policy deleted") +} + +func (manager *Manager) addEndpoint(endpoint *k8sTypes.CiliumEndpoint) error { + var epData *endpointMetadata + var err error + var identityLabels labels.Labels + + manager.mu.Lock() + defer manager.mu.Unlock() + + logger := manager.logger.With( + logfields.K8sEndpointName, endpoint.Name, + logfields.K8sNamespace, endpoint.Namespace, + logfields.K8sUID, endpoint.UID, + ) + + if endpoint.Identity == nil { + logger.Warn("Endpoint is missing identity metadata, skipping update to vtep policy.") + return nil + } + + if identityLabels, err = manager.getIdentityLabels(uint32(endpoint.Identity.ID)); err != nil { + logger.Warn("Failed to get identity labels for endpoint", logfields.Error, err) + return err + } + + if epData, err = getEndpointMetadata(endpoint, identityLabels); err != nil { + logger.Error("Failed to get valid endpoint metadata, skipping update to vtep policy.", logfields.Error, err) + return nil + } + + if _, ok := manager.epDataStore[epData.id]; ok { + logger.Debug("Updated CiliumEndpoint") + } else { + logger.Debug("Added CiliumEndpoint") + } + + manager.epDataStore[epData.id] = epData + + manager.reconciliationTrigger.TriggerWithReason("endpoint updated") + + return nil +} + +func (manager *Manager) deleteEndpoint(endpoint *k8sTypes.CiliumEndpoint) { + manager.mu.Lock() + defer manager.mu.Unlock() + + logger := manager.logger.With( + logfields.K8sEndpointName, endpoint.Name, + logfields.K8sNamespace, endpoint.Namespace, + logfields.K8sUID, endpoint.UID, + ) + + logger.Debug("Deleted CiliumEndpoint") + delete(manager.epDataStore, endpoint.UID) + + manager.reconciliationTrigger.TriggerWithReason("endpoint deleted") +} + +func (manager *Manager) handleEndpointEvent(event resource.Event[*k8sTypes.CiliumEndpoint]) { + endpoint := event.Object + + if event.Kind == resource.Upsert { + event.Done(manager.addEndpoint(endpoint)) + } else { + manager.deleteEndpoint(endpoint) + event.Done(nil) + } +} + +func (manager *Manager) updatePoliciesMatchedEndpointIDs() { + for _, policy := range manager.policyConfigs { + policy.updateMatchedEndpointIDs(manager.epDataStore) + } +} + +func (manager *Manager) updateVtepRules() { + if manager.policyMap == nil { + manager.logger.Error("policyMap is nil") + return + } + + vtepPolicies := map[vtep_policy.VtepPolicyKey]vtep_policy.VtepPolicyVal{} + manager.policyMap.IterateWithCallback( + func(key *vtep_policy.VtepPolicyKey, val *vtep_policy.VtepPolicyVal) { + vtepPolicies[*key] = *val + }) + + // Start with the assumption that all the entries currently present in the + // BPF map are stale. Then as we walk the entries below and discover which + // entries are actually still needed, shrink this set down. + stale := sets.KeySet(vtepPolicies) + + addVtepRule := func(endpointIP netip.Addr, dstCIDR netip.Prefix, vtep *vtepConfig) { + if !endpointIP.Is4() { + return + } + + if !dstCIDR.Addr().Is4() { + return + } + + if vtep == nil { + return + } + + policyKey := vtep_policy.NewKey(endpointIP, dstCIDR) + // This key needs to be present in the BPF map, hence remove it from + // the list of stale ones. + stale.Delete(policyKey) + + logger := manager.logger.With( + logfields.SourceIP, endpointIP, + logfields.DestinationCIDR, dstCIDR.String(), + logfields.VtepIP, vtep.vtepIP, + logfields.VtepMAC, vtep.vtepMAC, + ) + + if err := manager.policyMap.UpdateVtepPolicyMapping(endpointIP, dstCIDR, vtep.vtepIP, vtep.vtepMAC); err != nil { + logger.Error("Error applying vtep policy", logfields.Error, err) + } else { + logger.Debug("vtep policy applied") + } + } + + for _, policyConfig := range manager.policyConfigs { + policyConfig.forEachEndpointAndCIDR(addVtepRule) + } + + // Remove all the entries marked as stale. + for policyKey := range stale { + logger := manager.logger.With( + logfields.SourceIP, policyKey.SourceIP, + logfields.DestinationCIDR, policyKey.DestCIDR.String(), + ) + + if err := manager.policyMap.Delete(&policyKey); err != nil { + logger.Error("Error removing vtep gateway policy", logfields.Error, err) + } else { + logger.Debug("Vtep gateway policy removed") + } + } +} + +// reconcileLocked is responsible for reconciling the state of the manager (i.e. the +// desired state) with the actual state of the node (vtep policy map entries). +// +// Whenever it encounters an error, it will just log it and move to the next +// item, in order to reconcile as many states as possible. +func (manager *Manager) reconcileLocked() { + if !manager.allCachesSynced { + return + } + + manager.updatePoliciesMatchedEndpointIDs() + + // Update the content of the BPF maps. + manager.updateVtepRules() + + manager.reconciliationEventsCount.Add(1) +} diff --git a/pkg/vteppolicy/manager_privileged_test.go b/pkg/vteppolicy/manager_privileged_test.go new file mode 100644 index 0000000000000..0966736740b3f --- /dev/null +++ b/pkg/vteppolicy/manager_privileged_test.go @@ -0,0 +1,527 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package vteppolicy + +import ( + "context" + "fmt" + "net/netip" + "testing" + "time" + + "github.com/cilium/ebpf/rlimit" + "github.com/cilium/hive/hivetest" + "github.com/google/uuid" + "github.com/stretchr/testify/require" + "github.com/vishvananda/netlink" + "k8s.io/apimachinery/pkg/types" + + "github.com/cilium/cilium/pkg/bpf" + "github.com/cilium/cilium/pkg/datapath/linux/safenetlink" + "github.com/cilium/cilium/pkg/hive" + "github.com/cilium/cilium/pkg/identity" + cilium_api_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + "github.com/cilium/cilium/pkg/k8s/resource" + slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + k8sTypes "github.com/cilium/cilium/pkg/k8s/types" + "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/mac" + "github.com/cilium/cilium/pkg/maps/vtep" + "github.com/cilium/cilium/pkg/maps/vtep_policy" + "github.com/cilium/cilium/pkg/node/addressing" + nodeTypes "github.com/cilium/cilium/pkg/node/types" + "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/testutils" + testidentity "github.com/cilium/cilium/pkg/testutils/identity" +) + +const ( + testInterface1 = "cilium_egw1" + testInterface2 = "cilium_egw2" + + vtepIP1 = "1.2.3.4" + mac1 = "00:11:22:33:44:55" + + vtepIP2 = "1.2.3.5" + mac2 = "00:11:22:33:44:56" + + node1 = "k8s1" + node2 = "k8s2" + + node1IP = "192.168.1.1" + node2IP = "192.168.1.2" + + ep1IP = "10.0.0.1" + ep2IP = "10.0.0.2" + ep3IP = "10.0.0.3" + + destCIDR = "1.1.1.0/24" + + egressCIDR1 = "192.168.101.1/24" + egressCIDR2 = "192.168.102.1/24" +) + +var ( + ep1Labels = map[string]string{"test-key": "test-value-1"} + ep2Labels = map[string]string{"test-key": "test-value-2"} + + identityAllocator = testidentity.NewMockIdentityAllocator(nil) + + nodeGroup1Labels = map[string]string{"label1": "1"} + nodeGroup2Labels = map[string]string{"label2": "2"} +) + +type vtepRule struct { + sourceIP string + destCIDR string + vtepIP string + vtepMAC string +} + +type parsedVtepRule struct { + sourceIP netip.Addr + destCIDR netip.Prefix + vtepIP netip.Addr + vtepMAC mac.MAC +} + +type VtepPolicyTestSuite struct { + manager *Manager + policies fakeResource[*Policy] + nodes fakeResource[*cilium_api_v2.CiliumNode] + endpoints fakeResource[*k8sTypes.CiliumEndpoint] +} + +func setupVtepPolicyTestSuite(t *testing.T) *VtepPolicyTestSuite { + testutils.PrivilegedTest(t) + + logger := hivetest.Logger(t) + + bpf.CheckOrMountFS(logger, "") + + if err := vtep.VtepMap(nil).Create(); err != nil { + println(err) + } + + err := rlimit.RemoveMemlock() + require.NoError(t, err) + + nodeTypes.SetName(node1) + + k := &VtepPolicyTestSuite{} + k.policies = make(fakeResource[*Policy]) + k.nodes = make(fakeResource[*cilium_api_v2.CiliumNode]) + k.endpoints = make(fakeResource[*k8sTypes.CiliumEndpoint]) + + lc := hivetest.Lifecycle(t) + policyMap := vtep_policy.CreatePrivatePolicyMap(lc, nil) + + k.manager, err = newVtepPolicyManager(Params{ + Logger: logger, + Lifecycle: lc, + Config: Config{1 * time.Millisecond}, + DaemonConfig: &option.DaemonConfig{}, + IdentityAllocator: identityAllocator, + Policies: k.policies, + Endpoints: k.endpoints, + PolicyMap: policyMap, + }) + require.NoError(t, err) + require.NotNil(t, k.manager) + + return k +} + +func TestPrivilegedVtepPolicyCVPParser(t *testing.T) { + setupVtepPolicyTestSuite(t) + // must specify name + policy := policyParams{ + name: "", + destinationCIDRs: []string{destCIDR}, + } + + cvp, _ := newCVP(&policy) + _, err := ParseCVP(cvp) + require.Error(t, err) + + // catch nil DestinationCIDR field + policy = policyParams{ + name: "policy-1", + } + + cvp, _ = newCVP(&policy) + cvp.Spec.DestinationCIDRs = nil + _, err = ParseCVP(cvp) + require.Error(t, err) + + // must specify at least one DestinationCIDR + policy = policyParams{ + name: "policy-1", + } + + cvp, _ = newCVP(&policy) + _, err = ParseCVP(cvp) + require.Error(t, err) + + // catch nil VtepPolicy field + policy = policyParams{ + name: "policy-1", + destinationCIDRs: []string{destCIDR}, + } + + // must specify some sort of endpoint selector + policy = policyParams{ + name: "policy-1", + destinationCIDRs: []string{destCIDR}, + } + + cvp, _ = newCVP(&policy) + cvp.Spec.Selectors[0].NamespaceSelector = nil + cvp.Spec.Selectors[0].PodSelector = nil + _, err = ParseCVP(cvp) + require.Error(t, err) +} + +func TestPrivilegedVtepPolicyManager(t *testing.T) { + k := setupVtepPolicyTestSuite(t) + createTestInterface(t, testInterface1, []string{egressCIDR1}) + createTestInterface(t, testInterface2, []string{egressCIDR2}) + + vtepPolicyManager := k.manager + reconciliationEventsCount := vtepPolicyManager.reconciliationEventsCount.Load() + policyMap := k.manager.policyMap + + k.policies.sync(t) + k.nodes.sync(t) + k.endpoints.sync(t) + + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + node1 := newCiliumNode(node1, node1IP, nodeGroup1Labels) + k.nodes.process(t, resource.Event[*cilium_api_v2.CiliumNode]{ + Kind: resource.Upsert, + Object: node1.ToCiliumNode(), + }) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + node2 := newCiliumNode(node2, node2IP, nodeGroup2Labels) + k.nodes.process(t, resource.Event[*cilium_api_v2.CiliumNode]{ + Kind: resource.Upsert, + Object: node2.ToCiliumNode(), + }) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + // Create a new policy + policy1 := policyParams{ + name: "policy-1", + endpointLabels: ep1Labels, + destinationCIDRs: []string{destCIDR}, + podLabels: nodeGroup1Labels, + vtepIP: vtepIP1, + mac: mac1, + } + + addPolicy(t, k.policies, &policy1) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + assertVtepRules(t, policyMap, []vtepRule{}) + + // Add a new endpoint & ID which matches policy-1 + ep1, id1 := newEndpointAndIdentity("ep-1", ep1IP, "", ep1Labels) + addEndpoint(t, k.endpoints, &ep1) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + assertVtepRules(t, policyMap, []vtepRule{ + {ep1IP, destCIDR, vtepIP1, mac1}, + }) + + // Update the endpoint labels in order for it to not be a match + id1 = updateEndpointAndIdentity(&ep1, id1, map[string]string{}) + addEndpoint(t, k.endpoints, &ep1) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + assertVtepRules(t, policyMap, []vtepRule{}) + + // Restore the old endpoint lables in order for it to be a match + id1 = updateEndpointAndIdentity(&ep1, id1, ep1Labels) + addEndpoint(t, k.endpoints, &ep1) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + assertVtepRules(t, policyMap, []vtepRule{ + {ep1IP, destCIDR, vtepIP1, mac1}, + }) + + // Create a new policy + addPolicy(t, k.policies, &policyParams{ + name: "policy-2", + endpointLabels: ep2Labels, + destinationCIDRs: []string{destCIDR}, + podLabels: nodeGroup2Labels, + vtepIP: vtepIP2, + mac: mac2, + }) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + assertVtepRules(t, policyMap, []vtepRule{ + {ep1IP, destCIDR, vtepIP1, mac1}, + }) + + // Add a new endpoint and ID which matches policy-2 + ep2, _ := newEndpointAndIdentity("ep-2", ep2IP, "", ep2Labels) + addEndpoint(t, k.endpoints, &ep2) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + assertVtepRules(t, policyMap, []vtepRule{ + {ep1IP, destCIDR, vtepIP1, mac1}, + {ep2IP, destCIDR, vtepIP2, mac2}, + }) + + // Update the endpoint labels in order for it to not be a match + _ = updateEndpointAndIdentity(&ep1, id1, map[string]string{}) + addEndpoint(t, k.endpoints, &ep1) + waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + assertVtepRules(t, policyMap, []vtepRule{ + {ep2IP, destCIDR, vtepIP2, mac2}, + }) +} + +func TestPrivilegedEndpointDataStore(t *testing.T) { + k := setupVtepPolicyTestSuite(t) + + createTestInterface(t, testInterface1, []string{egressCIDR1}) + + vtepPolicyManager := k.manager + policyMap := k.manager.policyMap + + k.policies.sync(t) + k.nodes.sync(t) + k.endpoints.sync(t) + + reconciliationEventsCount := vtepPolicyManager.reconciliationEventsCount.Load() + + node1 := newCiliumNode(node1, node1IP, nodeGroup1Labels) + k.nodes.process(t, resource.Event[*cilium_api_v2.CiliumNode]{ + Kind: resource.Upsert, + Object: node1.ToCiliumNode(), + }) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + // Create a new policy + policy1 := policyParams{ + name: "policy-1", + endpointLabels: ep1Labels, + destinationCIDRs: []string{destCIDR}, + podLabels: nodeGroup1Labels, + vtepIP: vtepIP1, + mac: mac1, + } + + addPolicy(t, k.policies, &policy1) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + assertVtepRules(t, policyMap, []vtepRule{}) + + // Add a new endpoint & ID which matches policy-1 + ep1, _ := newEndpointAndIdentity("ep-1", ep1IP, "", ep1Labels) + addEndpoint(t, k.endpoints, &ep1) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + assertVtepRules(t, policyMap, []vtepRule{ + {ep1IP, destCIDR, vtepIP1, mac1}, + }) + + // Simulate statefulset pod migrations to a different node. + + // Produce a new endpoint ep2 similar to ep1 - with the same name & labels, but with a different IP address. + // The ep1 will be deleted. + ep2, _ := newEndpointAndIdentity(ep1.Name, ep2IP, "", ep1Labels) + + // Test event order: add new -> delete old + addEndpoint(t, k.endpoints, &ep2) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + deleteEndpoint(t, k.endpoints, &ep1) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + assertVtepRules(t, policyMap, []vtepRule{ + {ep2IP, destCIDR, vtepIP1, mac1}, + }) + + // Produce a new endpoint ep3 similar to ep2 (and ep1) - with the same name & labels, but with a different IP address. + ep3, _ := newEndpointAndIdentity(ep1.Name, ep3IP, "", ep1Labels) + + // Test event order: delete old -> update new + deleteEndpoint(t, k.endpoints, &ep2) + reconciliationEventsCount = waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + addEndpoint(t, k.endpoints, &ep3) + waitForReconciliationRun(t, vtepPolicyManager, reconciliationEventsCount) + + assertVtepRules(t, policyMap, []vtepRule{ + {ep3IP, destCIDR, vtepIP1, mac1}, + }) +} + +func TestCell(t *testing.T) { + err := hive.New(Cell).Populate(hivetest.Logger(t)) + if err != nil { + t.Fatal(err) + } +} + +func createTestInterface(tb testing.TB, iface string, addrs []string) { + tb.Helper() + + la := netlink.NewLinkAttrs() + la.Name = iface + dummy := &netlink.Dummy{LinkAttrs: la} + if err := netlink.LinkAdd(dummy); err != nil { + tb.Fatal(err) + } + + link, err := safenetlink.LinkByName(iface) + if err != nil { + tb.Fatal(err) + } + + tb.Cleanup(func() { + if err := netlink.LinkDel(link); err != nil { + tb.Error(err) + } + }) + + if err := netlink.LinkSetUp(link); err != nil { + tb.Fatal(err) + } + + for _, addr := range addrs { + a, _ := netlink.ParseAddr(addr) + if err := netlink.AddrAdd(link, a); err != nil { + tb.Fatal(err) + } + } +} + +func waitForReconciliationRun(tb testing.TB, vtepPolicyManager *Manager, currentRun uint64) uint64 { + for range 100 { + count := vtepPolicyManager.reconciliationEventsCount.Load() + if count > currentRun { + return count + } + + // TODO: investigate why increasing the timeout was necessary to add IPv6 tests. + time.Sleep(30 * time.Millisecond) + } + + tb.Fatal("Reconciliation is taking too long to run") + return 0 +} + +func newCiliumNode(name, nodeIP string, nodeLabels map[string]string) nodeTypes.Node { + return nodeTypes.Node{ + Name: name, + Labels: nodeLabels, + IPAddresses: []nodeTypes.Address{ + { + Type: addressing.NodeInternalIP, + IP: netip.MustParseAddr(nodeIP).AsSlice(), + }, + }, + } +} + +// Mock the creation of endpoint and its corresponding identity, returns endpoint and ID. +func newEndpointAndIdentity(name, ipv4, ipv6 string, epLabels map[string]string) (k8sTypes.CiliumEndpoint, *identity.Identity) { + id, _, _ := identityAllocator.AllocateIdentity(context.Background(), labels.Map2Labels(epLabels, labels.LabelSourceK8s), true, identity.InvalidIdentity) + + return k8sTypes.CiliumEndpoint{ + ObjectMeta: slimv1.ObjectMeta{ + Name: name, + UID: types.UID(uuid.New().String()), + }, + Identity: &cilium_api_v2.EndpointIdentity{ + ID: int64(id.ID), + }, + Networking: &cilium_api_v2.EndpointNetworking{ + Addressing: cilium_api_v2.AddressPairList{ + &cilium_api_v2.AddressPair{ + IPV4: ipv4, + IPV6: ipv6, + }, + }, + }, + }, id +} + +// Mock the update of endpoint and its corresponding identity, with new labels. Returns new ID. +func updateEndpointAndIdentity(endpoint *k8sTypes.CiliumEndpoint, oldID *identity.Identity, newEpLabels map[string]string) *identity.Identity { + ctx := context.Background() + + identityAllocator.Release(ctx, oldID, true) + newID, _, _ := identityAllocator.AllocateIdentity(ctx, labels.Map2Labels(newEpLabels, labels.LabelSourceK8s), true, identity.InvalidIdentity) + endpoint.Identity.ID = int64(newID.ID) + return newID +} + +func parseVtepRule(sourceIP, destCIDR, vtepIP, vtepMAC string) parsedVtepRule { + sip := netip.MustParseAddr(sourceIP) + dc := netip.MustParsePrefix(destCIDR) + vip := netip.MustParseAddr(vtepIP) + vmac, _ := mac.ParseMAC(vtepMAC) + + return parsedVtepRule{ + sourceIP: sip, + destCIDR: dc, + vtepIP: vip, + vtepMAC: vmac, + } +} + +func assertVtepRules(t *testing.T, policyMap *vtep_policy.VtepPolicyMap, rules []vtepRule) { + t.Helper() + + err := tryAssertVtepRules(policyMap, rules) + require.NoError(t, err) +} + +func tryAssertVtepRules(policyMap *vtep_policy.VtepPolicyMap, rules []vtepRule) error { + parsedRules := []parsedVtepRule{} + for _, r := range rules { + parsedRules = append(parsedRules, parseVtepRule(r.sourceIP, r.destCIDR, r.vtepIP, r.vtepMAC)) + } + + for _, r := range parsedRules { + key := vtep_policy.NewKey(r.sourceIP, r.destCIDR) + + val, err := policyMap.Lookup(&key) + if err != nil { + return fmt.Errorf("cannot lookup policy entry: %w", err) + } + + if val == nil { + return fmt.Errorf("lookup successful but value is nil") + } + + if !val.Match(r.vtepIP, r.vtepMAC) { + return fmt.Errorf("mismatched val, wanted: %s %s, got: %s", r.vtepIP, r.vtepMAC, val) + } + } + + untrackedRule := false + policyMap.IterateWithCallback( + func(key *vtep_policy.VtepPolicyKey, val *vtep_policy.VtepPolicyVal) { + for _, r := range parsedRules { + if key.Match(r.sourceIP, r.destCIDR) && val.Match(r.vtepIP, r.vtepMAC) { + return + } + } + + untrackedRule = true + }) + + if untrackedRule { + return fmt.Errorf("Untracked vtep policy") + } + + return nil +} diff --git a/pkg/vteppolicy/policy.go b/pkg/vteppolicy/policy.go new file mode 100644 index 0000000000000..571f5c865a19a --- /dev/null +++ b/pkg/vteppolicy/policy.go @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package vteppolicy + +import ( + "fmt" + "net/netip" + + "k8s.io/apimachinery/pkg/types" + + k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" + "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + k8sLabels "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + "github.com/cilium/cilium/pkg/mac" + "github.com/cilium/cilium/pkg/policy" + "github.com/cilium/cilium/pkg/policy/api" +) + +// vtepConfig is the gateway configuration derived at runtime from a policy. +type vtepConfig struct { + // vtepIP is the IP used for vxlan tunnel + vtepIP netip.Addr + // vtepMAC is the mac address of remote node behing vxlan tunnel + vtepMAC mac.MAC +} + +// PolicyConfig is the internal representation of CiliumVtepPolicy. +type PolicyConfig struct { + // id is the parsed config name and namespace + id types.NamespacedName + + podSelectors []api.EndpointSelector + dstCIDRs []netip.Prefix + + matchedEndpoints map[endpointID]*endpointMetadata + vtepConfig vtepConfig +} + +// PolicyID includes policy name and namespace +type policyID = types.NamespacedName + +// matchesNodeLabels determines if the given node lables is a match for the +// policy config based on matching labels. +func (config *PolicyConfig) matchesPodLabels(podLabels map[string]string) bool { + labelsToMatch := k8sLabels.Set(podLabels) + for _, selector := range config.podSelectors { + if selector.Matches(labelsToMatch) { + return true + } + } + return false +} + +// updateMatchedEndpointIDs update the policy's cache of matched endpoint IDs +func (config *PolicyConfig) updateMatchedEndpointIDs(epDataStore map[endpointID]*endpointMetadata) { + config.matchedEndpoints = make(map[endpointID]*endpointMetadata) + for _, endpoint := range epDataStore { + if config.matchesPodLabels(endpoint.labels) { + config.matchedEndpoints[endpoint.id] = endpoint + } + } +} + +// forEachEndpointAndCIDR iterates through each combination of endpoints and +// destination/excluded CIDRs of the receiver policy, and for each of them it +// calls the f callback function passing the given endpoint and CIDR, together +// with a boolean value indicating if the CIDR belongs to the excluded ones and +// the vtepConfig of the receiver policy +func (config *PolicyConfig) forEachEndpointAndCIDR(f func(netip.Addr, netip.Prefix, *vtepConfig)) { + for _, endpoint := range config.matchedEndpoints { + for _, endpointIP := range endpoint.ips { + for _, dstCIDR := range config.dstCIDRs { + f(endpointIP, dstCIDR, &config.vtepConfig) + } + } + } +} + +// ParseCVP takes a CiliumVtepPolicy CR and converts to PolicyConfig, +// the internal representation of the vtep policy +func ParseCVP(cvp *v2alpha1.CiliumVtepPolicy) (*PolicyConfig, error) { + var podSelectorList []api.EndpointSelector + var dstCidrList []netip.Prefix + var vtepIP netip.Addr + + allowAllNamespacesRequirement := slim_metav1.LabelSelectorRequirement{ + Key: k8sConst.PodNamespaceLabel, + Operator: slim_metav1.LabelSelectorOpExists, + } + + name := cvp.ObjectMeta.Name + if name == "" { + return nil, fmt.Errorf("must have a name") + } + + destinationCIDRs := cvp.Spec.DestinationCIDRs + if destinationCIDRs == nil { + return nil, fmt.Errorf("destinationCIDRs can't be empty") + } + + externalVTEP := cvp.Spec.ExternalVTEP + if externalVTEP == nil { + return nil, fmt.Errorf("externalVTEP can't be empty") + } + + vtepIP, err := netip.ParseAddr(externalVTEP.IP) + if err != nil { + return nil, fmt.Errorf("cannot parse vtep ip") + } + + vtepMAC, err := mac.ParseMAC(string(externalVTEP.MAC)) + if err != nil { + return nil, fmt.Errorf("cannot parse vtep mac %s: %w", vtepMAC, err) + } + + for _, cidrString := range destinationCIDRs { + cidr, err := netip.ParsePrefix(string(cidrString)) + if err != nil { + return nil, fmt.Errorf("failed to parse destination CIDR %s: %w", cidrString, err) + } + dstCidrList = append(dstCidrList, cidr) + } + + for _, vtepRule := range cvp.Spec.Selectors { + if vtepRule.NamespaceSelector != nil { + prefixedNsSelector := vtepRule.NamespaceSelector + matchLabels := map[string]string{} + // We use our own special label prefix for namespace metadata, + // thus we need to prefix that prefix to all NamespaceSelector.MatchLabels + for k, v := range vtepRule.NamespaceSelector.MatchLabels { + matchLabels[policy.JoinPath(k8sConst.PodNamespaceMetaLabels, k)] = v + } + + prefixedNsSelector.MatchLabels = matchLabels + + // We use our own special label prefix for namespace metadata, + // thus we need to prefix that prefix to all NamespaceSelector.MatchLabels + for i, lsr := range vtepRule.NamespaceSelector.MatchExpressions { + lsr.Key = policy.JoinPath(k8sConst.PodNamespaceMetaLabels, lsr.Key) + prefixedNsSelector.MatchExpressions[i] = lsr + } + + // Empty namespace selector selects all namespaces (i.e., a namespace + // label exists). + if len(vtepRule.NamespaceSelector.MatchLabels) == 0 && len(vtepRule.NamespaceSelector.MatchExpressions) == 0 { + prefixedNsSelector.MatchExpressions = []slim_metav1.LabelSelectorRequirement{allowAllNamespacesRequirement} + } + } else if vtepRule.PodSelector != nil { + podSelectorList = append( + podSelectorList, + api.NewESFromK8sLabelSelector("", vtepRule.PodSelector)) + } else { + return nil, fmt.Errorf("cannot have both nil namespace selector and nil pod selector") + } + } + + return &PolicyConfig{ + podSelectors: podSelectorList, + dstCIDRs: dstCidrList, + vtepConfig: vtepConfig{ + vtepIP: vtepIP, + vtepMAC: vtepMAC, + }, + matchedEndpoints: make(map[endpointID]*endpointMetadata), + id: types.NamespacedName{ + Name: name, + }, + }, nil +} + +// ParseCEGPConfigID takes a CiliumVtepPolicy CR and returns only the config id +func ParseCVPConfigID(cvp *v2alpha1.CiliumVtepPolicy) types.NamespacedName { + return policyID{ + Name: cvp.Name, + } +} diff --git a/pkg/vteppolicy/policy_test.go b/pkg/vteppolicy/policy_test.go new file mode 100644 index 0000000000000..7998e3c013765 --- /dev/null +++ b/pkg/vteppolicy/policy_test.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package vteppolicy + +import ( + "net/netip" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/types" + + slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + "github.com/cilium/cilium/pkg/policy/api" +) + +func TestPolicyConfig_updateMatchedEndpointIDs(t *testing.T) { + type fields struct { + id types.NamespacedName + endpointSelectors []api.EndpointSelector + podSelectors []api.EndpointSelector + dstCIDRs []netip.Prefix + matchedEndpoints map[endpointID]*endpointMetadata + } + type args struct { + epDataStore map[endpointID]*endpointMetadata + } + tests := []struct { + name string + fields fields + args args + want int + wantEndpointID endpointID + }{ + { + name: "Test updateMatchedEndpointIDs with endpoints", + fields: fields{ + id: types.NamespacedName{ + Name: "test", + }, + endpointSelectors: []api.EndpointSelector{ + { + LabelSelector: &slimv1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test", + }, + }, + }, + }, + podSelectors: []api.EndpointSelector{ + { + LabelSelector: &slimv1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test", + }, + }, + }, + }, + }, + args: args{ + epDataStore: map[endpointID]*endpointMetadata{ + "123456": { + id: "123456", + labels: map[string]string{ + "app": "test", + }, + }, + }, + }, + want: 1, + wantEndpointID: endpointID("123456"), + }, + { + name: "Test updateMatchedEndpointIDs endpoints with no match", + fields: fields{ + id: types.NamespacedName{ + Name: "test", + }, + endpointSelectors: []api.EndpointSelector{ + { + LabelSelector: &slimv1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test", + }, + }, + }, + }, + podSelectors: []api.EndpointSelector{ + { + LabelSelector: &slimv1.LabelSelector{ + MatchLabels: map[string]string{ + "pod-name": "pod1", + }, + }, + }, + }, + }, + args: args{ + epDataStore: map[endpointID]*endpointMetadata{ + "123456": { + id: "123456", + labels: map[string]string{ + "app": "test", + }, + }, + }, + }, + want: 0, + wantEndpointID: "", + }, + { + name: "Test updateMatchedEndpointIDs endpoints with no match label", + fields: fields{ + id: types.NamespacedName{ + Name: "test", + }, + endpointSelectors: []api.EndpointSelector{ + { + LabelSelector: &slimv1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test", + }, + }, + }, + }, + podSelectors: []api.EndpointSelector{ + { + LabelSelector: &slimv1.LabelSelector{ + MatchLabels: map[string]string{ + "pod-name": "pod1", + }, + }, + }, + }, + }, + args: args{ + epDataStore: map[endpointID]*endpointMetadata{ + "123456": { + id: "123456", + labels: map[string]string{ + "app": "test", + }, + }, + }, + }, + want: 0, + wantEndpointID: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := &PolicyConfig{ + id: tt.fields.id, + podSelectors: tt.fields.podSelectors, + dstCIDRs: tt.fields.dstCIDRs, + matchedEndpoints: tt.fields.matchedEndpoints, + } + config.updateMatchedEndpointIDs(tt.args.epDataStore) + assert.Len(t, config.matchedEndpoints, tt.want) + if tt.want > 0 { + assert.Contains(t, config.matchedEndpoints, endpointID(tt.wantEndpointID)) + } + }) + } +} diff --git a/pkg/vteppolicy/resource.go b/pkg/vteppolicy/resource.go new file mode 100644 index 0000000000000..eb44894e9a697 --- /dev/null +++ b/pkg/vteppolicy/resource.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package vteppolicy + +import ( + "github.com/cilium/hive/cell" + + "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + "github.com/cilium/cilium/pkg/k8s/client" + "github.com/cilium/cilium/pkg/k8s/resource" + "github.com/cilium/cilium/pkg/k8s/utils" +) + +type Policy = v2alpha1.CiliumVtepPolicy + +func newPolicyResource(lc cell.Lifecycle, c client.Clientset) resource.Resource[*Policy] { + if !c.IsEnabled() { + return nil + } + lw := utils.ListerWatcherFromTyped(c.CiliumV2alpha1().CiliumVtepPolicies()) + return resource.New[*Policy](lc, lw, nil) +} diff --git a/pkg/xds/experimental/client/cell_test.go b/pkg/xds/experimental/client/cell_test.go index 7a7e64d9daea1..2fcd8343a41e4 100644 --- a/pkg/xds/experimental/client/cell_test.go +++ b/pkg/xds/experimental/client/cell_test.go @@ -7,14 +7,14 @@ import ( "fmt" "testing" - "github.com/cilium/cilium/pkg/hive" - "github.com/cilium/cilium/pkg/node" - "github.com/cilium/hive/cell" "github.com/cilium/hive/hivetest" - discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" core_v1 "k8s.io/api/core/v1" + + cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" + "github.com/cilium/cilium/pkg/hive" + "github.com/cilium/cilium/pkg/node" ) func TestCell_SuccessfullyRunClient(t *testing.T) { @@ -26,6 +26,7 @@ func TestCell_SuccessfullyRunClient(t *testing.T) { cell.Provide(NewInsecureGRPCOptionsProvider), node.LocalNodeStoreTestCell, Cell, + cell.Provide(func() cmtypes.ClusterInfo { return cmtypes.ClusterInfo{} }), cell.Invoke(func(localNodeStore *node.LocalNodeStore) { localNodeStore.Update(func(n *node.LocalNode) { hLog.Info("Update localNodeStore") diff --git a/stable.txt b/stable.txt index 2a34c700cf2d6..4fe71e952da87 100644 --- a/stable.txt +++ b/stable.txt @@ -1 +1 @@ -v1.18.1 +v1.18.2 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index c73744e2309f1..1799c6ef223f9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,16 @@ # Release History +## 1.19.1 (2025-09-11) + +### Bugs Fixed + +* Fixed resource identifier parsing for provider-specific resource hierarchies containing "resourceGroups" segments. + +### Other Changes + +* Improved error fall-back for improperly authored long-running operations. +* Upgraded dependencies. + ## 1.19.0 (2025-08-21) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go index a08d3d0ffa682..b8348b7d82e81 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -123,9 +123,9 @@ func newResourceIDWithProvider(parent *ResourceID, providerNamespace, resourceTy } func chooseResourceType(resourceTypeName string, parent *ResourceID) ResourceType { - if strings.EqualFold(resourceTypeName, resourceGroupsLowerKey) { + if strings.EqualFold(resourceTypeName, resourceGroupsLowerKey) && isSubscriptionResource(parent) { return ResourceGroupResourceType - } else if strings.EqualFold(resourceTypeName, subscriptionsKey) && parent != nil && parent.ResourceType.String() == TenantResourceType.String() { + } else if strings.EqualFold(resourceTypeName, subscriptionsKey) && isTenantResource(parent) { return SubscriptionResourceType } @@ -182,12 +182,12 @@ func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, err if len(parts) == 1 { // subscriptions and resourceGroups are not valid ids without their names - if strings.EqualFold(parts[0], subscriptionsKey) || strings.EqualFold(parts[0], resourceGroupsLowerKey) { + if strings.EqualFold(parts[0], subscriptionsKey) && isTenantResource(parent) || strings.EqualFold(parts[0], resourceGroupsLowerKey) && isSubscriptionResource(parent) { return nil, fmt.Errorf("invalid resource ID: %s", id) } // resourceGroup must contain either child or provider resource type - if parent.ResourceType.String() == ResourceGroupResourceType.String() { + if isResourceGroupResource(parent) { return nil, fmt.Errorf("invalid resource ID: %s", id) } @@ -196,7 +196,7 @@ func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, err if strings.EqualFold(parts[0], providersKey) && (len(parts) == 2 || strings.EqualFold(parts[2], providersKey)) { // provider resource can only be on a tenant or a subscription parent - if parent.ResourceType.String() != SubscriptionResourceType.String() && parent.ResourceType.String() != TenantResourceType.String() { + if !isSubscriptionResource(parent) && !isTenantResource(parent) { return nil, fmt.Errorf("invalid resource ID: %s", id) } @@ -225,3 +225,18 @@ func splitStringAndOmitEmpty(v, sep string) []string { return r } + +// isTenantResource returns true if the resourceID represents a tenant resource. The condition is resource ID matched with TenantResourceType and has no parent. +func isTenantResource(resourceID *ResourceID) bool { + return resourceID != nil && strings.EqualFold(resourceID.ResourceType.String(), TenantResourceType.String()) && resourceID.Parent == nil +} + +// isSubscriptionResource returns true if the resourceID represents a subscription resource. The condition is resource ID matched with SubscriptionResourceType and its parent is a tenant resource. +func isSubscriptionResource(resourceID *ResourceID) bool { + return resourceID != nil && strings.EqualFold(resourceID.ResourceType.String(), SubscriptionResourceType.String()) && isTenantResource(resourceID.Parent) +} + +// isResourceGroupResource returns true if the resourceID represents a resource group resource. The condition is resource ID matched with ResourceGroupResourceType and its parent is a subscription resource. +func isResourceGroupResource(resourceID *ResourceID) bool { + return resourceID != nil && strings.EqualFold(resourceID.ResourceType.String(), ResourceGroupResourceType.String()) && isSubscriptionResource(resourceID.Parent) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 9fb41a405ab38..8aebe5ce53b97 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.19.0" + Version = "v1.19.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index 4f90e44743238..a89ae9b7b9d52 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -91,7 +91,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol // this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success). // ideally the codegen should return an error if the initial response failed and not even create a poller. if !poller.StatusCodeValid(resp) { - return nil, errors.New("the operation failed or was cancelled") + return nil, exported.NewResponseError(resp) } // determine the polling method diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 9e68cf6701528..ab63f9c031b7a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,17 @@ # Release History +## 1.12.0 (2025-09-16) + +### Features Added +- Added `DefaultAzureCredentialOptions.RequireAzureTokenCredentials`. `NewDefaultAzureCredential` returns an + error when this option is true and the environment variable `AZURE_TOKEN_CREDENTIALS` has no value. + +### Other Changes +- `AzureDeveloperCLICredential` no longer hangs when AZD_DEBUG is set +- `GetToken` methods of `AzureCLICredential` and `AzureDeveloperCLICredential` return an error when + `TokenRequestOptions.Claims` has a value because these credentials can't acquire a token in that + case. The error messages describe the action required to get a token. + ## 1.11.0 (2025-08-05) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 6ac513846d9e8..838601d69c80a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -18,7 +18,6 @@ This troubleshooting guide covers failure investigation techniques, common error - [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues) - [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues) - [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity) - - [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity) - [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity) - [Troubleshoot WorkloadIdentityCredential authentication issues](#troubleshoot-workloadidentitycredential-authentication-issues) - [Get additional help](#get-additional-help) @@ -120,7 +119,6 @@ azlog.SetEvents(azidentity.EventAuthentication) |---|---|---| |Azure Virtual Machines and Scale Sets|[Configuration](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| |Azure App Service and Azure Functions|[Configuration](https://learn.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| -|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)| |Azure Arc|[Configuration](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| |Azure Service Fabric|[Configuration](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity)|| @@ -159,14 +157,6 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio > This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. -### Azure Kubernetes Service managed identity - -#### Pod Identity - -| Error Message |Description| Mitigation | -|---|---|---| -|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). - ## Troubleshoot AzureCLICredential authentication issues diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index 4118f99ef2c91..1646ff9116741 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_191110b0dd" + "Tag": "go/azidentity_530ea4279b" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index 0fd03f45634ac..6944152c96e11 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -7,14 +7,11 @@ package azidentity import ( - "bytes" "context" + "encoding/base64" "encoding/json" "errors" "fmt" - "os" - "os/exec" - "runtime" "strings" "sync" "time" @@ -26,8 +23,6 @@ import ( const credNameAzureCLI = "AzureCLICredential" -type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscription string) ([]byte, error) - // AzureCLICredentialOptions contains optional parameters for AzureCLICredential. type AzureCLICredentialOptions struct { // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to @@ -45,15 +40,8 @@ type AzureCLICredentialOptions struct { // inDefaultChain is true when the credential is part of DefaultAzureCredential inDefaultChain bool - // tokenProvider is used by tests to fake invoking az - tokenProvider azTokenProvider -} - -// init returns an instance of AzureCLICredentialOptions initialized with default values. -func (o *AzureCLICredentialOptions) init() { - if o.tokenProvider == nil { - o.tokenProvider = defaultAzTokenProvider - } + // exec is used by tests to fake invoking az + exec executor } // AzureCLICredential authenticates as the identity logged in to the Azure CLI. @@ -80,7 +68,9 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent if cp.TenantID != "" && !validTenantID(cp.TenantID) { return nil, errInvalidTenantID } - cp.init() + if cp.exec == nil { + cp.exec = shellExec + } cp.AdditionallyAllowedTenants = resolveAdditionalTenants(cp.AdditionallyAllowedTenants) return &AzureCLICredential{mu: &sync.Mutex{}, opts: cp}, nil } @@ -99,14 +89,37 @@ func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequ if err != nil { return at, err } + // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes + resource := strings.TrimSuffix(opts.Scopes[0], defaultSuffix) + command := "az account get-access-token -o json --resource " + resource + tenantArg := "" + if tenant != "" { + tenantArg = " --tenant " + tenant + command += tenantArg + } + if c.opts.Subscription != "" { + // subscription needs quotes because it may contain spaces + command += ` --subscription "` + c.opts.Subscription + `"` + } + if opts.Claims != "" { + encoded := base64.StdEncoding.EncodeToString([]byte(opts.Claims)) + return at, fmt.Errorf( + "%s.GetToken(): Azure CLI requires multifactor authentication or additional claims. Run this command then retry the operation: az login%s --claims-challenge %s", + credNameAzureCLI, + tenantArg, + encoded, + ) + } + c.mu.Lock() defer c.mu.Unlock() - b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.Subscription) + + b, err := c.opts.exec(ctx, credNameAzureCLI, command) if err == nil { at, err = c.createAccessToken(b) } if err != nil { - err = unavailableIfInChain(err, c.opts.inDefaultChain) + err = unavailableIfInDAC(err, c.opts.inDefaultChain) return at, err } msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureCLI, strings.Join(opts.Scopes, ", ")) @@ -114,63 +127,6 @@ func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequ return at, nil } -// defaultAzTokenProvider invokes the Azure CLI to acquire a token. It assumes -// callers have verified that all string arguments are safe to pass to the CLI. -var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes []string, tenantID, subscription string) ([]byte, error) { - // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes - resource := strings.TrimSuffix(scopes[0], defaultSuffix) - // set a default timeout for this authentication iff the application hasn't done so already - var cancel context.CancelFunc - if _, hasDeadline := ctx.Deadline(); !hasDeadline { - ctx, cancel = context.WithTimeout(ctx, cliTimeout) - defer cancel() - } - commandLine := "az account get-access-token -o json --resource " + resource - if tenantID != "" { - commandLine += " --tenant " + tenantID - } - if subscription != "" { - // subscription needs quotes because it may contain spaces - commandLine += ` --subscription "` + subscription + `"` - } - var cliCmd *exec.Cmd - if runtime.GOOS == "windows" { - dir := os.Getenv("SYSTEMROOT") - if dir == "" { - return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value") - } - cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) - cliCmd.Dir = dir - } else { - cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) - cliCmd.Dir = "/bin" - } - cliCmd.Env = os.Environ() - var stderr bytes.Buffer - cliCmd.Stderr = &stderr - cliCmd.WaitDelay = 100 * time.Millisecond - - stdout, err := cliCmd.Output() - if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 { - // The child process wrote to stdout and exited without closing it. - // Swallow this error and return stdout because it may contain a token. - return stdout, nil - } - if err != nil { - msg := stderr.String() - var exErr *exec.ExitError - if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") { - msg = "Azure CLI not found on path" - } - if msg == "" { - msg = err.Error() - } - return nil, newCredentialUnavailableError(credNameAzureCLI, msg) - } - - return stdout, nil -} - func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { t := struct { AccessToken string `json:"accessToken"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go index 1bd3720b64977..f97bf95df9b76 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -7,14 +7,11 @@ package azidentity import ( - "bytes" "context" + "encoding/base64" "encoding/json" "errors" "fmt" - "os" - "os/exec" - "runtime" "strings" "sync" "time" @@ -24,9 +21,10 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/internal/log" ) -const credNameAzureDeveloperCLI = "AzureDeveloperCLICredential" - -type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) ([]byte, error) +const ( + credNameAzureDeveloperCLI = "AzureDeveloperCLICredential" + mfaRequired = "Azure Developer CLI requires multifactor authentication or additional claims" +) // AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential. type AzureDeveloperCLICredentialOptions struct { @@ -41,8 +39,8 @@ type AzureDeveloperCLICredentialOptions struct { // inDefaultChain is true when the credential is part of DefaultAzureCredential inDefaultChain bool - // tokenProvider is used by tests to fake invoking azd - tokenProvider azdTokenProvider + // exec is used by tests to fake invoking azd + exec executor } // AzureDeveloperCLICredential authenticates as the identity logged in to the [Azure Developer CLI]. @@ -62,8 +60,8 @@ func NewAzureDeveloperCLICredential(options *AzureDeveloperCLICredentialOptions) if cp.TenantID != "" && !validTenantID(cp.TenantID) { return nil, errInvalidTenantID } - if cp.tokenProvider == nil { - cp.tokenProvider = defaultAzdTokenProvider + if cp.exec == nil { + cp.exec = shellExec } return &AzureDeveloperCLICredential{mu: &sync.Mutex{}, opts: cp}, nil } @@ -75,23 +73,52 @@ func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy. if len(opts.Scopes) == 0 { return at, errors.New(credNameAzureDeveloperCLI + ": GetToken() requires at least one scope") } + command := "azd auth token -o json --no-prompt" for _, scope := range opts.Scopes { if !validScope(scope) { return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureDeveloperCLI, scope) } + command += " --scope " + scope } tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureDeveloperCLI, c.opts.AdditionallyAllowedTenants) if err != nil { return at, err } + if tenant != "" { + command += " --tenant-id " + tenant + } + commandNoClaims := command + if opts.Claims != "" { + encoded := base64.StdEncoding.EncodeToString([]byte(opts.Claims)) + command += " --claims " + encoded + } + c.mu.Lock() defer c.mu.Unlock() - b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant) + + b, err := c.opts.exec(ctx, credNameAzureDeveloperCLI, command) if err == nil { at, err = c.createAccessToken(b) } if err != nil { - err = unavailableIfInChain(err, c.opts.inDefaultChain) + msg := err.Error() + switch { + case strings.Contains(msg, "unknown flag: --claims"): + err = newAuthenticationFailedError( + credNameAzureDeveloperCLI, + mfaRequired+", however the installed version doesn't support this. Upgrade to version 1.18.1 or later", + nil, + ) + case opts.Claims != "": + err = newAuthenticationFailedError( + credNameAzureDeveloperCLI, + mfaRequired+". Run this command then retry the operation: "+commandNoClaims, + nil, + ) + case strings.Contains(msg, "azd auth login"): + err = newCredentialUnavailableError(credNameAzureDeveloperCLI, `please run "azd auth login" from a command prompt to authenticate before using this credential`) + } + err = unavailableIfInDAC(err, c.opts.inDefaultChain) return at, err } msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureDeveloperCLI, strings.Join(opts.Scopes, ", ")) @@ -99,61 +126,6 @@ func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy. return at, nil } -// defaultAzTokenProvider invokes the Azure Developer CLI to acquire a token. It assumes -// callers have verified that all string arguments are safe to pass to the CLI. -var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes []string, tenant string) ([]byte, error) { - // set a default timeout for this authentication iff the application hasn't done so already - var cancel context.CancelFunc - if _, hasDeadline := ctx.Deadline(); !hasDeadline { - ctx, cancel = context.WithTimeout(ctx, cliTimeout) - defer cancel() - } - commandLine := "azd auth token -o json" - if tenant != "" { - commandLine += " --tenant-id " + tenant - } - for _, scope := range scopes { - commandLine += " --scope " + scope - } - var cliCmd *exec.Cmd - if runtime.GOOS == "windows" { - dir := os.Getenv("SYSTEMROOT") - if dir == "" { - return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, "environment variable 'SYSTEMROOT' has no value") - } - cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) - cliCmd.Dir = dir - } else { - cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) - cliCmd.Dir = "/bin" - } - cliCmd.Env = os.Environ() - var stderr bytes.Buffer - cliCmd.Stderr = &stderr - cliCmd.WaitDelay = 100 * time.Millisecond - - stdout, err := cliCmd.Output() - if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 { - // The child process wrote to stdout and exited without closing it. - // Swallow this error and return stdout because it may contain a token. - return stdout, nil - } - if err != nil { - msg := stderr.String() - var exErr *exec.ExitError - if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'azd' is not recognized") { - msg = "Azure Developer CLI not found on path" - } else if strings.Contains(msg, "azd auth login") { - msg = `please run "azd auth login" from a command prompt to authenticate before using this credential` - } - if msg == "" { - msg = err.Error() - } - return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg) - } - return stdout, nil -} - func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { t := struct { AccessToken string `json:"token"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index 38445e8536695..51dd9793908b6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -41,6 +41,3 @@ extends: GenerateVMJobs: true Path: sdk/azidentity/managed-identity-matrix.json Selection: sparse - MatrixReplace: - - Pool=.*LINUXPOOL.*/azsdk-pool-mms-ubuntu-2204-identitymsi - - OSVmImage=.*LINUXVMIMAGE.*/azsdk-pool-mms-ubuntu-2204-1espt diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 2b94270a8c6a0..c041a52dbbe1a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -48,6 +48,10 @@ type DefaultAzureCredentialOptions struct { // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // RequireAzureTokenCredentials determines whether NewDefaultAzureCredential returns an error when the environment + // variable AZURE_TOKEN_CREDENTIALS has no value. + RequireAzureTokenCredentials bool + // TenantID sets the default tenant for authentication via the Azure CLI, Azure Developer CLI, and workload identity. TenantID string } @@ -82,6 +86,10 @@ type DefaultAzureCredentialOptions struct { // - "dev": try [AzureCLICredential] and [AzureDeveloperCLICredential], in that order // - "prod": try [EnvironmentCredential], [WorkloadIdentityCredential], and [ManagedIdentityCredential], in that order // +// [DefaultAzureCredentialOptions].RequireAzureTokenCredentials controls whether AZURE_TOKEN_CREDENTIALS must be set. +// NewDefaultAzureCredential returns an error when RequireAzureTokenCredentials is true and AZURE_TOKEN_CREDENTIALS +// has no value. +// // [DefaultAzureCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview type DefaultAzureCredential struct { chain *ChainedTokenCredential @@ -89,6 +97,10 @@ type DefaultAzureCredential struct { // NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults. func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) { + if options == nil { + options = &DefaultAzureCredentialOptions{} + } + var ( creds []azcore.TokenCredential errorMessages []string @@ -114,11 +126,10 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default default: return nil, fmt.Errorf(`invalid %s value %q. Valid values are "dev", "prod", or the name of any credential type in the default chain. See https://aka.ms/azsdk/go/identity/docs#DefaultAzureCredential for more information`, azureTokenCredentials, atc) } + } else if options.RequireAzureTokenCredentials { + return nil, fmt.Errorf("%s must be set when RequireAzureTokenCredentials is true. See https://aka.ms/azsdk/go/identity/docs#DefaultAzureCredential for more information", azureTokenCredentials) } - if options == nil { - options = &DefaultAzureCredentialOptions{} - } additionalTenants := options.AdditionallyAllowedTenants if len(additionalTenants) == 0 { if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go index be963d3a2af0d..14f8a0312659b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go @@ -7,22 +7,73 @@ package azidentity import ( + "bytes" + "context" "errors" + "os" + "os/exec" + "runtime" + "strings" "time" ) // cliTimeout is the default timeout for authentication attempts via CLI tools const cliTimeout = 10 * time.Second -// unavailableIfInChain returns err or, if the credential was invoked by DefaultAzureCredential, a +// executor runs a command and returns its output or an error +type executor func(ctx context.Context, credName, command string) ([]byte, error) + +var shellExec = func(ctx context.Context, credName, command string) ([]byte, error) { + // set a default timeout for this authentication iff the caller hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, cliTimeout) + defer cancel() + } + var cmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credName, `environment variable "SYSTEMROOT" has no value`) + } + cmd = exec.CommandContext(ctx, "cmd.exe", "/c", command) + cmd.Dir = dir + } else { + cmd = exec.CommandContext(ctx, "/bin/sh", "-c", command) + cmd.Dir = "/bin" + } + cmd.Env = os.Environ() + stderr := bytes.Buffer{} + cmd.Stderr = &stderr + cmd.WaitDelay = 100 * time.Millisecond + + stdout, err := cmd.Output() + if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 { + // The child process wrote to stdout and exited without closing it. + // Swallow this error and return stdout because it may contain a token. + return stdout, nil + } + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.Contains(msg, "' is not recognized") { + return nil, newCredentialUnavailableError(credName, "CLI executable not found on path") + } + if msg == "" { + msg = err.Error() + } + return nil, newAuthenticationFailedError(credName, msg, nil) + } + + return stdout, nil +} + +// unavailableIfInDAC returns err or, if the credential was invoked by DefaultAzureCredential, a // credentialUnavailableError having the same message. This ensures DefaultAzureCredential will try // the next credential in its chain (another developer credential). -func unavailableIfInChain(err error, inDefaultChain bool) error { - if err != nil && inDefaultChain { - var unavailableErr credentialUnavailable - if !errors.As(err, &unavailableErr) { - err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error()) - } +func unavailableIfInDAC(err error, inDefaultChain bool) error { + if err != nil && inDefaultChain && !errors.As(err, new(credentialUnavailable)) { + err = NewCredentialUnavailableError(err.Error()) } return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json index f92245533fcb2..063325c69d672 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json @@ -10,8 +10,7 @@ }, "GoVersion": [ "env:GO_VERSION_PREVIOUS" - ], - "IDENTITY_IMDS_AVAILABLE": "1" + ] } ] -} +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 index 874d4ef37ddf0..c5634cd21d073 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -41,7 +41,7 @@ if ($CI) { az account set --subscription $SubscriptionId } -Write-Host "Building container" +Write-Host "##[group]Building container" $image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test" Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @" FROM mcr.microsoft.com/oss/go/microsoft/golang:latest AS builder @@ -62,11 +62,34 @@ CMD ["./managed-id-test"] docker build -t $image "$PSScriptRoot" az acr login -n $DeploymentOutputs['AZIDENTITY_ACR_NAME'] docker push $image +Write-Host "##[endgroup]" $rg = $DeploymentOutputs['AZIDENTITY_RESOURCE_GROUP'] +Write-Host "##[group]Deploying to VM" +# az will return 0 when the script fails on the VM, so the script prints a UUID to indicate all commands succeeded +$uuid = [guid]::NewGuid().ToString() +$vmScript = @" +az acr login -n $($DeploymentOutputs['AZIDENTITY_ACR_NAME']) && \ +sudo docker run \ +-e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) \ +-e AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) \ +-e AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) \ +-e AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) \ +-e AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) \ +-p 80:8080 -d \ +$image && \ +/usr/bin/echo $uuid +"@ +$output = az vm run-command invoke -g $rg -n $DeploymentOutputs['AZIDENTITY_VM_NAME'] --command-id RunShellScript --scripts "$vmScript" | Out-String +Write-Host $output +if (-not $output.Contains($uuid)) { + throw "couldn't start container on VM" +} +Write-Host "##[endgroup]" + # ACI is easier to provision here than in the bicep file because the image isn't available before now -Write-Host "Deploying Azure Container Instance" +Write-Host "##[group]Deploying Azure Container Instance" $aciName = "azidentity-test" az container create -g $rg -n $aciName --image $image ` --acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` @@ -85,23 +108,27 @@ az container create -g $rg -n $aciName --image $image ` FUNCTIONS_CUSTOMHANDLER_PORT=80 $aciIP = az container show -g $rg -n $aciName --query ipAddress.ip --output tsv Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_IP;]$aciIP" +Write-Host "##[endgroup]" # Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip -Write-Host "Deploying to Azure Functions" +Write-Host "##[group]Deploying to Azure Functions" $container = docker create $image docker cp ${container}:managed-id-test.exe "$PSScriptRoot/testdata/managed-id-test/" docker rm -v $container Compress-Archive -Path "$PSScriptRoot/testdata/managed-id-test/*" -DestinationPath func.zip -Force az functionapp deploy -g $rg -n $DeploymentOutputs['AZIDENTITY_FUNCTION_NAME'] --src-path func.zip --type zip +Write-Host "##[endgroup]" -Write-Host "Creating federated identity" +Write-Host "##[group]Creating federated identity" $aksName = $DeploymentOutputs['AZIDENTITY_AKS_NAME'] $idName = $DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME'] $issuer = az aks show -g $rg -n $aksName --query "oidcIssuerProfile.issuerUrl" -otsv $podName = "azidentity-test" $serviceAccountName = "workload-identity-sa" az identity federated-credential create -g $rg --identity-name $idName --issuer $issuer --name $idName --subject system:serviceaccount:default:$serviceAccountName --audiences api://AzureADTokenExchange -Write-Host "Deploying to AKS" +Write-Host "##[endgroup]" + +Write-Host "##[group]Deploying to AKS" az aks get-credentials -g $rg -n $aksName az aks update --attach-acr $DeploymentOutputs['AZIDENTITY_ACR_NAME'] -g $rg -n $aksName Set-Content -Path "$PSScriptRoot/k8s.yaml" -Value @" @@ -138,3 +165,4 @@ spec: "@ kubectl apply -f "$PSScriptRoot/k8s.yaml" Write-Host "##vso[task.setvariable variable=AZIDENTITY_POD_NAME;]$podName" +Write-Host "##[endgroup]" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep index 135feb0178e13..cb3b5f4df42b1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep @@ -19,7 +19,10 @@ param location string = resourceGroup().location // https://learn.microsoft.com/azure/role-based-access-control/built-in-roles var acrPull = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') -var blobReader = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1') +var blobReader = subscriptionResourceId( + 'Microsoft.Authorization/roleDefinitions', + '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1' +) resource sa 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) { kind: 'StorageV2' @@ -60,6 +63,16 @@ resource acrPullContainerInstance 'Microsoft.Authorization/roleAssignments@2022- scope: containerRegistry } +resource acrPullVM 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + name: guid(resourceGroup().id, acrPull, 'vm') + properties: { + principalId: deployResources ? vm.identity.principalId : '' + principalType: 'ServicePrincipal' + roleDefinitionId: acrPull + } + scope: containerRegistry +} + resource blobRoleUserAssigned 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { scope: saUserAssigned name: guid(resourceGroup().id, blobReader, usermgdid.id) @@ -80,6 +93,16 @@ resource blobRoleFunc 'Microsoft.Authorization/roleAssignments@2022-04-01' = if scope: sa } +resource blobRoleVM 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + scope: sa + name: guid(resourceGroup().id, blobReader, 'vm') + properties: { + principalId: deployResources ? vm.identity.principalId : '' + roleDefinitionId: blobReader + principalType: 'ServicePrincipal' + } +} + resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-01-01-preview' = if (deployResources) { location: location name: uniqueString(resourceGroup().id) @@ -215,6 +238,143 @@ resource aks 'Microsoft.ContainerService/managedClusters@2023-06-01' = if (deplo } } +resource publicIP 'Microsoft.Network/publicIPAddresses@2023-05-01' = if (deployResources) { + name: '${baseName}PublicIP' + location: location + sku: { + name: 'Standard' + } + properties: { + publicIPAllocationMethod: 'Static' + } +} + +resource nsg 'Microsoft.Network/networkSecurityGroups@2024-07-01' = if (deployResources) { + name: '${baseName}NSG' + location: location + properties: { + securityRules: [ + { + name: 'AllowHTTP' + properties: { + description: 'Allow HTTP traffic on port 80' + protocol: 'Tcp' + sourcePortRange: '*' + destinationPortRange: '80' + sourceAddressPrefix: '*' + destinationAddressPrefix: '*' + access: 'Allow' + priority: 1000 + direction: 'Inbound' + } + } + ] + } +} + +resource vnet 'Microsoft.Network/virtualNetworks@2024-07-01' = if (deployResources) { + name: '${baseName}vnet' + location: location + properties: { + addressSpace: { + addressPrefixes: [ + '10.0.0.0/16' + ] + } + subnets: [ + { + name: '${baseName}subnet' + properties: { + addressPrefix: '10.0.0.0/24' + defaultOutboundAccess: false + networkSecurityGroup: { + id: deployResources ? nsg.id : '' + } + } + } + ] + } +} + +resource nic 'Microsoft.Network/networkInterfaces@2024-07-01' = if (deployResources) { + name: '${baseName}NIC' + location: location + properties: { + ipConfigurations: [ + { + name: 'myIPConfig' + properties: { + privateIPAllocationMethod: 'Dynamic' + publicIPAddress: { + id: deployResources ? publicIP.id : '' + } + subnet: { + id: deployResources ? vnet.properties.subnets[0].id : '' + } + } + } + ] + } +} + +resource vm 'Microsoft.Compute/virtualMachines@2024-07-01' = if (deployResources) { + name: '${baseName}vm' + location: location + identity: { + type: 'SystemAssigned, UserAssigned' + userAssignedIdentities: { + '${deployResources ? usermgdid.id: ''}': {} + } + } + properties: { + hardwareProfile: { + vmSize: 'Standard_DS1_v2' + } + osProfile: { + adminUsername: adminUser + computerName: '${baseName}vm' + customData: base64(''' +#cloud-config +package_update: true +packages: + - docker.io +runcmd: + - curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + - az login --identity --allow-no-subscriptions +''') + linuxConfiguration: { + disablePasswordAuthentication: true + ssh: { + publicKeys: [ + { + path: '/home/${adminUser}/.ssh/authorized_keys' + keyData: sshPubKey + } + ] + } + } + } + networkProfile: { + networkInterfaces: [ + { + id: deployResources ? nic.id : '' + } + ] + } + storageProfile: { + imageReference: { + publisher: 'Canonical' + offer: 'ubuntu-24_04-lts' + sku: 'server' + version: 'latest' + } + osDisk: { + createOption: 'FromImage' + } + } + } +} + output AZIDENTITY_ACR_LOGIN_SERVER string = deployResources ? containerRegistry.properties.loginServer : '' output AZIDENTITY_ACR_NAME string = deployResources ? containerRegistry.name : '' output AZIDENTITY_AKS_NAME string = deployResources ? aks.name : '' @@ -226,3 +386,5 @@ output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : '' output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : '' output AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID string = deployResources ? usermgdid.properties.principalId : '' +output AZIDENTITY_VM_NAME string = deployResources ? vm.name : '' +output AZIDENTITY_VM_IP string = deployResources ? publicIP.properties.ipAddress : '' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index c3a70c4f2e43e..4c88605366da4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.11.0" + version = "v1.12.0" ) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go index cda678e334267..c6baf2094777f 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -143,9 +143,10 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) { headerErr := q.Get("error") if headerErr != "" { desc := html.EscapeString(q.Get("error_description")) + escapedHeaderErr := html.EscapeString(headerErr) // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, // change this to s.error() and make s.error() write the failPage instead of an error code. - _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) + _, _ = w.Write([]byte(fmt.Sprintf(failPage, escapedHeaderErr, desc))) s.putResult(Result{Err: fmt.Errorf("%s", desc)}) return diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go index c3c4a96fc3020..3f403746404c4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -46,9 +46,11 @@ type jsonCaller interface { JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error } +// For backward compatibility, accept both old and new China endpoints for a transition period. var aadTrustedHostList = map[string]bool{ "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list - "login.partner.microsoftonline.cn": true, // Microsoft Azure China + "login.partner.microsoftonline.cn": true, // Microsoft Azure China (new) + "login.chinacloudapi.cn": true, // Microsoft Azure China (legacy, backward compatibility) "login.microsoftonline.de": true, // Microsoft Azure Blackforest "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy "login.microsoftonline.us": true, // Microsoft Azure US Government @@ -98,6 +100,41 @@ func (r *TenantDiscoveryResponse) Validate() error { return nil } +// ValidateIssuerMatchesAuthority validates that the issuer in the TenantDiscoveryResponse matches the authority. +// This is used to identity security or configuration issues in authorities and the OIDC endpoint +func (r *TenantDiscoveryResponse) ValidateIssuerMatchesAuthority(authorityURI string, aliases map[string]bool) error { + + if authorityURI == "" { + return errors.New("TenantDiscoveryResponse: empty authorityURI provided for validation") + } + + // Parse the issuer URL + issuerURL, err := url.Parse(r.Issuer) + if err != nil { + return fmt.Errorf("TenantDiscoveryResponse: failed to parse issuer URL: %w", err) + } + + // Even if it doesn't match the authority, issuers from known and trusted hosts are valid + if aliases != nil && aliases[issuerURL.Host] { + return nil + } + + // Parse the authority URL for comparison + authorityURL, err := url.Parse(authorityURI) + if err != nil { + return fmt.Errorf("TenantDiscoveryResponse: failed to parse authority URL: %w", err) + } + + // Check if the scheme and host match (paths can be ignored when validating the issuer) + if issuerURL.Scheme == authorityURL.Scheme && issuerURL.Host == authorityURL.Host { + return nil + } + + // If we get here, validation failed + return fmt.Errorf("TenantDiscoveryResponse: issuer from OIDC discovery '%s' does not match authority '%s' or a known pattern", + r.Issuer, authorityURI) +} + type InstanceDiscoveryMetadata struct { PreferredNetwork string `json:"preferred_network"` PreferredCache string `json:"preferred_cache"` @@ -354,6 +391,8 @@ type Info struct { Tenant string Region string InstanceDiscoveryDisabled bool + // InstanceDiscoveryMetadata stores the metadata from AAD instance discovery + InstanceDiscoveryMetadata []InstanceDiscoveryMetadata } // NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go index 4030ec8d8f1be..d220a99466c1d 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -21,10 +21,12 @@ import ( type cacheEntry struct { Endpoints authority.Endpoints ValidForDomainsInList map[string]bool + // Aliases stores host aliases from instance discovery for quick lookup + Aliases map[string]bool } func createcacheEntry(endpoints authority.Endpoints) cacheEntry { - return cacheEntry{endpoints, map[string]bool{}} + return cacheEntry{endpoints, map[string]bool{}, map[string]bool{}} } // AuthorityEndpoint retrieves endpoints from an authority for auth and token acquisition. @@ -71,10 +73,15 @@ func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo m.addCachedEndpoints(authorityInfo, userPrincipalName, endpoints) + if err := resp.ValidateIssuerMatchesAuthority(authorityInfo.CanonicalAuthorityURI, + m.cache[authorityInfo.CanonicalAuthorityURI].Aliases); err != nil { + return authority.Endpoints{}, fmt.Errorf("ResolveEndpoints(): %w", err) + } + return endpoints, nil } -// cachedEndpoints returns a the cached endpoints if they exists. If not, we return false. +// cachedEndpoints returns the cached endpoints if they exist. If not, we return false. func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, bool) { m.mu.Lock() defer m.mu.Unlock() @@ -113,6 +120,13 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use } } + // Extract aliases from instance discovery metadata and add to cache + for _, metadata := range authorityInfo.InstanceDiscoveryMetadata { + for _, alias := range metadata.Aliases { + updatedCacheEntry.Aliases[alias] = true + } + } + m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry } @@ -127,12 +141,14 @@ func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, aut if err != nil { return "", err } + authorityInfo.InstanceDiscoveryMetadata = resp.Metadata return resp.TenantDiscoveryEndpoint, nil } else if authorityInfo.Region != "" { resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) if err != nil { return "", err } + authorityInfo.InstanceDiscoveryMetadata = resp.Metadata return resp.TenantDiscoveryEndpoint, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index b72921f87be29..86f7edb16a033 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.38.3" +const goModuleVersion = "1.39.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index 6ee3391be2737..3314230fd8ca8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -135,6 +135,8 @@ const ( UserAgentFeatureCredentialsAwsSdkStore = "y" // n/a (this is used by .NET based sdk) UserAgentFeatureCredentialsHTTP = "z" UserAgentFeatureCredentialsIMDS = "0" + + UserAgentFeatureBearerServiceEnvVars = "3" ) var credentialSourceToFeature = map[aws.CredentialSource]UserAgentFeature{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index be76d2ce8e93f..748be162a65c6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.31.8 (2025-09-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.7 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.31.6 (2025-08-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 13bccf2d20f21..2971dd9fad1e8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.31.6" +const goModuleVersion = "1.31.8" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index e5c6b5a38b663..48b3b7ca84305 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.18.12 (2025-09-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.11 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.18.10 (2025-08-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 03dee74957f8d..05c61c64c09bb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.10" +const goModuleVersion = "1.18.12" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index db2ef11aee56e..f27565751599e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.18.7 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.18.6 (2025-08-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index d82f70843d230..c6028b32aca77 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.6" +const goModuleVersion = "1.18.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index e0ebf39032d62..553faf5037dc8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.4.7 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.4.6 (2025-08-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 3479c11c48a81..d0c4a4e33e0d7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.4.6" +const goModuleVersion = "1.4.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 7ccb390338ad9..4f0701fd0d78b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,7 @@ +# v2.7.7 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.7.6 (2025-08-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 2d36cac95a8b3..af8faba143bc0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.7.6" +const goModuleVersion = "2.7.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md index 4b0dd74d8d367..a12bf45b117c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md @@ -1,3 +1,19 @@ +# v1.253.0 (2025-09-18) + +* **Feature**: Allowed AMIs adds support for four new parameters - marketplaceProductCodes, deprecationTimeCondition, creationDateCondition and imageNames + +# v1.252.0 (2025-09-17) + +* **Feature**: Add mac-m4.metal and mac-m4pro.metal instance types. + +# v1.251.2 (2025-09-10) + +* No change notes available for this release. + +# v1.251.1 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.251.0 (2025-09-04) * **Feature**: Add m8i, m8i-flex and i8ge instance types. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopySnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopySnapshot.go index 04748f31e0a9d..8fcec9ada6db4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopySnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopySnapshot.go @@ -28,25 +28,22 @@ import ( // // - If the source snapshot is on an Outpost, you can't copy it. // -// When copying snapshots to a Region, copies of encrypted EBS snapshots remain -// encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable -// encryption for the snapshot copy operation. By default, encrypted snapshot -// copies use the default KMS key; however, you can specify a different KMS key. To -// copy an encrypted snapshot that has been shared from another account, you must -// have permissions for the KMS key used to encrypt the snapshot. +// When copying snapshots to a Region, the encryption outcome for the snapshot +// copy depends on the Amazon EBS encryption by default setting for the destination +// Region, the encryption status of the source snapshot, and the encryption +// parameters you specify in the request. For more information, see [Encryption and snapshot copying]. // -// Snapshots copied to an Outpost are encrypted by default using the default -// encryption key for the Region, or a different key that you specify in the -// request using KmsKeyId. Outposts do not support unencrypted snapshots. For more -// information, see [Amazon EBS local snapshots on Outposts]in the Amazon EBS User Guide. +// Snapshots copied to an Outpost must be encrypted. Unencrypted snapshots are not +// supported on Outposts. For more information, [Amazon EBS local snapshots on Outposts]. // // Snapshots copies have an arbitrary source volume ID. Do not use this volume ID // for any purpose. // // For more information, see [Copy an Amazon EBS snapshot] in the Amazon EBS User Guide. // +// [Encryption and snapshot copying]: https://docs.aws.amazon.com/ebs/latest/userguide/ebs-copy-snapshot.html#creating-encrypted-snapshots // [Copy an Amazon EBS snapshot]: https://docs.aws.amazon.com/ebs/latest/userguide/ebs-copy-snapshot.html -// [Amazon EBS local snapshots on Outposts]: https://docs.aws.amazon.com/ebs/latest/userguide/snapshots-outposts.html#ami +// [Amazon EBS local snapshots on Outposts]: https://docs.aws.amazon.com/ebs/latest/userguide/snapshots-outposts.html#considerations func (c *Client) CopySnapshot(ctx context.Context, params *CopySnapshotInput, optFns ...func(*Options)) (*CopySnapshotOutput, error) { if params == nil { params = &CopySnapshotInput{} @@ -111,7 +108,7 @@ type CopySnapshotInput struct { // To encrypt a copy of an unencrypted snapshot if encryption by default is not // enabled, enable encryption using this parameter. Otherwise, omit this parameter. - // Encrypted snapshots are encrypted, even if you omit this parameter and + // Copies of encrypted snapshots are encrypted, even if you omit this parameter and // encryption by default is not enabled. You cannot set this parameter to false. // For more information, see [Amazon EBS encryption]in the Amazon EBS User Guide. // diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFpgaImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFpgaImage.go index d5a6a1625660e..567d831d1b6cc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFpgaImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFpgaImage.go @@ -13,8 +13,8 @@ import ( // Creates an Amazon FPGA Image (AFI) from the specified design checkpoint (DCP). // -// The create operation is asynchronous. To verify that the AFI is ready for use, -// check the output logs. +// The create operation is asynchronous. To verify that the AFI was successfully +// created and is ready for use, check the output logs. // // An AFI contains the FPGA bitstream that is ready to download to an FPGA. You // can securely deploy an AFI on multiple FPGA-accelerated instances. For more diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageReferences.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageReferences.go index 014f031c33868..8faa0147e6783 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageReferences.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageReferences.go @@ -56,7 +56,7 @@ type DescribeImageReferencesInput struct { // // Either IncludeAllResourceTypes or ResourceTypes must be specified. // - // [How AMI reference checks work]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-ami-references-works.html + // [How AMI reference checks work]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-ami-references.html#how-ami-references-works IncludeAllResourceTypes *bool // The maximum number of items to return for this request. To get the next page diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstances.go index 6a559b9211efd..f63ee3bc70c77 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstances.go @@ -381,13 +381,13 @@ type DescribeInstancesInput struct { // // - platform-details - The platform ( Linux/UNIX | Red Hat BYOL Linux | Red Hat // Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise - // Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL - // Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | - // Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with - // SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL - // Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with - // SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL - // Server Web ). + // Linux with High Availability | Red Hat Enterprise Linux with SQL Server + // Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | + // Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux + // with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | + // SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | + // Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | + // Windows with SQL Server Standard | Windows with SQL Server Web ). // // - private-dns-name - The private IPv4 DNS name of the instance. // diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageBlockPublicAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageBlockPublicAccess.go index ffc2e2903f918..fcc6f35444f67 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageBlockPublicAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageBlockPublicAccess.go @@ -16,14 +16,9 @@ import ( // from your account. With the restriction removed, you can publicly share your // AMIs in the specified Amazon Web Services Region. // -// The API can take up to 10 minutes to configure this setting. During this time, -// if you run [GetImageBlockPublicAccessState], the response will be block-new-sharing . When the API has completed -// the configuration, the response will be unblocked . -// // For more information, see [Block public access to your AMIs] in the Amazon EC2 User Guide. // // [Block public access to your AMIs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-public-access-to-amis.html -// [GetImageBlockPublicAccessState]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetImageBlockPublicAccessState.html func (c *Client) DisableImageBlockPublicAccess(ctx context.Context, params *DisableImageBlockPublicAccessInput, optFns ...func(*Options)) (*DisableImageBlockPublicAccessOutput, error) { if params == nil { params = &DisableImageBlockPublicAccessInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go index 86dc92f42640b..efb93df8b0b41 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go @@ -79296,6 +79296,59 @@ func awsEc2query_deserializeDocumentCreateVolumePermissionListUnwrapped(v *[]typ *v = sv return nil } +func awsEc2query_deserializeDocumentCreationDateCondition(v **types.CreationDateCondition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CreationDateCondition + if *v == nil { + sv = &types.CreationDateCondition{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("maximumDaysSinceCreated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaximumDaysSinceCreated = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentCreditSpecification(v **types.CreditSpecification, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -80856,6 +80909,59 @@ func awsEc2query_deserializeDocumentDeleteSnapshotReturnCode(v **types.DeleteSna return nil } +func awsEc2query_deserializeDocumentDeprecationTimeCondition(v **types.DeprecationTimeCondition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DeprecationTimeCondition + if *v == nil { + sv = &types.DeprecationTimeCondition{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("maximumDaysSinceDeprecated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaximumDaysSinceDeprecated = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentDeprovisionedAddressSet(v *[]string, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -93572,12 +93678,36 @@ func awsEc2query_deserializeDocumentImageCriterion(v **types.ImageCriterion, dec originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("creationDateCondition", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentCreationDateCondition(&sv.CreationDateCondition, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("deprecationTimeCondition", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentDeprecationTimeCondition(&sv.DeprecationTimeCondition, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("imageNameSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentImageNameList(&sv.ImageNames, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("imageProviderSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentImageProviderList(&sv.ImageProviders, nodeDecoder); err != nil { return err } + case strings.EqualFold("marketplaceProductCodeSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentMarketplaceProductCodeList(&sv.MarketplaceProductCodes, nodeDecoder); err != nil { + return err + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -93887,6 +94017,86 @@ func awsEc2query_deserializeDocumentImageMetadata(v **types.ImageMetadata, decod return nil } +func awsEc2query_deserializeDocumentImageNameList(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("item", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentImageNameListUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} func awsEc2query_deserializeDocumentImageProviderList(v *[]string, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -116426,6 +116636,86 @@ func awsEc2query_deserializeDocumentManagedPrefixListSetUnwrapped(v *[]types.Man *v = sv return nil } +func awsEc2query_deserializeDocumentMarketplaceProductCodeList(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("item", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentMarketplaceProductCodeListUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} func awsEc2query_deserializeDocumentMediaAcceleratorInfo(v **types.MediaAcceleratorInfo, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/endpoints.go index 1743a98bc8ad7..6d8a49eb4ce68 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/endpoints.go @@ -328,7 +328,9 @@ func (r *resolver) ResolveEndpoint( return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) } _UseDualStack := *params.UseDualStack + _ = _UseDualStack _UseFIPS := *params.UseFIPS + _ = _UseFIPS if exprVal := params.Endpoint; exprVal != nil { _Endpoint := *exprVal diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go index 67906b4444436..26045e8180754 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go @@ -3,4 +3,4 @@ package ec2 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.251.0" +const goModuleVersion = "1.253.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go index 116e11217e305..81aa10ebc49da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go @@ -50107,6 +50107,18 @@ func awsEc2query_serializeDocumentCreateVolumePermissionModifications(v *types.C return nil } +func awsEc2query_serializeDocumentCreationDateConditionRequest(v *types.CreationDateConditionRequest, value query.Value) error { + object := value.Object() + _ = object + + if v.MaximumDaysSinceCreated != nil { + objectKey := object.Key("MaximumDaysSinceCreated") + objectKey.Integer(*v.MaximumDaysSinceCreated) + } + + return nil +} + func awsEc2query_serializeDocumentCreditSpecificationRequest(v *types.CreditSpecificationRequest, value query.Value) error { object := value.Object() _ = object @@ -50210,6 +50222,18 @@ func awsEc2query_serializeDocumentDeleteQueuedReservedInstancesIdList(v []string return nil } +func awsEc2query_serializeDocumentDeprecationTimeConditionRequest(v *types.DeprecationTimeConditionRequest, value query.Value) error { + object := value.Object() + _ = object + + if v.MaximumDaysSinceDeprecated != nil { + objectKey := object.Key("MaximumDaysSinceDeprecated") + objectKey.Integer(*v.MaximumDaysSinceDeprecated) + } + + return nil +} + func awsEc2query_serializeDocumentDeregisterInstanceTagAttributeRequest(v *types.DeregisterInstanceTagAttributeRequest, value query.Value) error { object := value.Object() _ = object @@ -51411,6 +51435,27 @@ func awsEc2query_serializeDocumentImageCriterionRequest(v *types.ImageCriterionR object := value.Object() _ = object + if v.CreationDateCondition != nil { + objectKey := object.Key("CreationDateCondition") + if err := awsEc2query_serializeDocumentCreationDateConditionRequest(v.CreationDateCondition, objectKey); err != nil { + return err + } + } + + if v.DeprecationTimeCondition != nil { + objectKey := object.Key("DeprecationTimeCondition") + if err := awsEc2query_serializeDocumentDeprecationTimeConditionRequest(v.DeprecationTimeCondition, objectKey); err != nil { + return err + } + } + + if v.ImageNames != nil { + objectKey := object.FlatKey("ImageName") + if err := awsEc2query_serializeDocumentImageNameRequestList(v.ImageNames, objectKey); err != nil { + return err + } + } + if v.ImageProviders != nil { objectKey := object.FlatKey("ImageProvider") if err := awsEc2query_serializeDocumentImageProviderRequestList(v.ImageProviders, objectKey); err != nil { @@ -51418,6 +51463,13 @@ func awsEc2query_serializeDocumentImageCriterionRequest(v *types.ImageCriterionR } } + if v.MarketplaceProductCodes != nil { + objectKey := object.FlatKey("MarketplaceProductCode") + if err := awsEc2query_serializeDocumentMarketplaceProductCodeRequestList(v.MarketplaceProductCodes, objectKey); err != nil { + return err + } + } + return nil } @@ -51516,6 +51568,19 @@ func awsEc2query_serializeDocumentImageIdStringList(v []string, value query.Valu return nil } +func awsEc2query_serializeDocumentImageNameRequestList(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Item") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsEc2query_serializeDocumentImageProviderRequestList(v []string, value query.Value) error { if len(v) == 0 { return nil @@ -54182,6 +54247,19 @@ func awsEc2query_serializeDocumentMacSystemIntegrityProtectionConfigurationReque return nil } +func awsEc2query_serializeDocumentMarketplaceProductCodeRequestList(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Item") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsEc2query_serializeDocumentMemoryGiBPerVCpu(v *types.MemoryGiBPerVCpu, value query.Value) error { object := value.Object() _ = object diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go index 0bb8b668efc01..2830243236668 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go @@ -4564,6 +4564,8 @@ const ( InstanceTypeI8ge48xlarge InstanceType = "i8ge.48xlarge" InstanceTypeI8geMetal24xl InstanceType = "i8ge.metal-24xl" InstanceTypeI8geMetal48xl InstanceType = "i8ge.metal-48xl" + InstanceTypeMacM4Metal InstanceType = "mac-m4.metal" + InstanceTypeMacM4proMetal InstanceType = "mac-m4pro.metal" ) // Values returns all known values for InstanceType. Note that this can be @@ -5588,6 +5590,8 @@ func (InstanceType) Values() []InstanceType { "i8ge.48xlarge", "i8ge.metal-24xl", "i8ge.metal-48xl", + "mac-m4.metal", + "mac-m4pro.metal", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go index 4a12dac7b7e17..c91d266b65a60 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go @@ -3323,6 +3323,28 @@ type CreateVolumePermissionModifications struct { noSmithyDocumentSerde } +// The maximum age for allowed images. +type CreationDateCondition struct { + + // The maximum number of days that have elapsed since the image was created. For + // example, a value of 300 allows images that were created within the last 300 + // days. + MaximumDaysSinceCreated *int32 + + noSmithyDocumentSerde +} + +// The maximum age for allowed images. +type CreationDateConditionRequest struct { + + // The maximum number of days that have elapsed since the image was created. For + // example, a value of 300 allows images that were created within the last 300 + // days. + MaximumDaysSinceCreated *int32 + + noSmithyDocumentSerde +} + // Describes the credit option for CPU usage of a T instance. type CreditSpecification struct { @@ -3606,6 +3628,26 @@ type DeleteSnapshotReturnCode struct { noSmithyDocumentSerde } +// The maximum period since deprecation for allowed images. +type DeprecationTimeCondition struct { + + // The maximum number of days that have elapsed since the image was deprecated. + // When set to 0 , no deprecated images are allowed. + MaximumDaysSinceDeprecated *int32 + + noSmithyDocumentSerde +} + +// The maximum period since deprecation for allowed images. +type DeprecationTimeConditionRequest struct { + + // The maximum number of days that have elapsed since the image was deprecated. + // Set to 0 to exclude all deprecated images. + MaximumDaysSinceDeprecated *int32 + + noSmithyDocumentSerde +} + // Information about the tag keys to deregister for the current Region. You can // either specify individual tag keys or deregister all tag keys in the current // Region. You must specify either IncludeAllTagsOfInstance or InstanceTagKeys in @@ -6996,68 +7038,140 @@ type Image struct { noSmithyDocumentSerde } -// The list of criteria that are evaluated to determine whch AMIs are discoverable -// and usable in the account in the specified Amazon Web Services Region. -// Currently, the only criteria that can be specified are AMI providers. +// The criteria that are evaluated to determine which AMIs are discoverable and +// usable in your account for the specified Amazon Web Services Region. // -// Up to 10 imageCriteria objects can be specified, and up to a total of 200 -// values for all imageProviders . For more information, see [JSON configuration for the Allowed AMIs criteria] in the Amazon EC2 -// User Guide. +// For more information, see [How Allowed AMIs works] in the Amazon EC2 User Guide. // -// [JSON configuration for the Allowed AMIs criteria]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-allowed-amis.html#allowed-amis-json-configuration +// [How Allowed AMIs works]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-allowed-amis.html#how-allowed-amis-works type ImageCriterion struct { - // A list of AMI providers whose AMIs are discoverable and useable in the account. - // Up to a total of 200 values can be specified. + // The maximum age for allowed images. + CreationDateCondition *CreationDateCondition + + // The maximum period since deprecation for allowed images. + DeprecationTimeCondition *DeprecationTimeCondition + + // The names of allowed images. Names can include wildcards ( ? and * ). + // + // Length: 1–128 characters. With ? , the minimum is 3 characters. + // + // Valid characters: + // + // - Letters: A–Z, a–z + // + // - Numbers: 0–9 + // + // - Special characters: ( ) [ ] . / - ' @ _ * ? + // + // - Spaces + // + // Maximum: 50 values + ImageNames []string + + // The image providers whose images are allowed. // // Possible values: // - // amazon : Allow AMIs created by Amazon Web Services. + // - amazon : Allow AMIs created by Amazon or verified providers. + // + // - aws-marketplace : Allow AMIs created by verified providers in the Amazon Web + // Services Marketplace. // - // aws-marketplace : Allow AMIs created by verified providers in the Amazon Web - // Services Marketplace. + // - aws-backup-vault : Allow AMIs created by Amazon Web Services Backup. // - // aws-backup-vault : Allow AMIs created by Amazon Web Services Backup. + // - 12-digit account ID: Allow AMIs created by this account. One or more + // account IDs can be specified. // - // 12-digit account ID: Allow AMIs created by this account. One or more account - // IDs can be specified. + // - none : Allow AMIs created by your own account only. // - // none : Allow AMIs created by your own account only. + // Maximum: 200 values ImageProviders []string + // The Amazon Web Services Marketplace product codes for allowed images. + // + // Length: 1-25 characters + // + // Valid characters: Letters ( A–Z, a–z ) and numbers ( 0–9 ) + // + // Maximum: 50 values + MarketplaceProductCodes []string + noSmithyDocumentSerde } -// The list of criteria that are evaluated to determine whch AMIs are discoverable -// and usable in the account in the specified Amazon Web Services Region. -// Currently, the only criteria that can be specified are AMI providers. +// The criteria that are evaluated to determine which AMIs are discoverable and +// usable in your account for the specified Amazon Web Services Region. // -// Up to 10 imageCriteria objects can be specified, and up to a total of 200 -// values for all imageProviders . For more information, see [JSON configuration for the Allowed AMIs criteria] in the Amazon EC2 -// User Guide. +// The ImageCriteria can include up to: +// +// - 10 ImageCriterion // -// [JSON configuration for the Allowed AMIs criteria]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-allowed-amis.html#allowed-amis-json-configuration +// Each ImageCriterion can include up to: +// +// - 200 values for ImageProviders +// +// - 50 values for ImageNames +// +// - 50 values for MarketplaceProductCodes +// +// For more information, see [How Allowed AMIs works] in the Amazon EC2 User Guide. +// +// [How Allowed AMIs works]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-allowed-amis.html#how-allowed-amis-works type ImageCriterionRequest struct { - // A list of image providers whose AMIs are discoverable and useable in the - // account. Up to a total of 200 values can be specified. + // The maximum age for allowed images. + CreationDateCondition *CreationDateConditionRequest + + // The maximum period since deprecation for allowed images. + DeprecationTimeCondition *DeprecationTimeConditionRequest + + // The names of allowed images. Names can include wildcards ( ? and * ). + // + // Length: 1–128 characters. With ? , the minimum is 3 characters. + // + // Valid characters: + // + // - Letters: A–Z, a–z + // + // - Numbers: 0–9 + // + // - Special characters: ( ) [ ] . / - ' @ _ * ? + // + // - Spaces + // + // Maximum: 50 values + ImageNames []string + + // The image providers whose images are allowed. // // Possible values: // - // amazon : Allow AMIs created by Amazon Web Services. + // - amazon : Allow AMIs created by Amazon or verified providers. + // + // - aws-marketplace : Allow AMIs created by verified providers in the Amazon Web + // Services Marketplace. // - // aws-marketplace : Allow AMIs created by verified providers in the Amazon Web - // Services Marketplace. + // - aws-backup-vault : Allow AMIs created by Amazon Web Services Backup. // - // aws-backup-vault : Allow AMIs created by Amazon Web Services Backup. + // - 12-digit account ID: Allow AMIs created by the specified accounts. One or + // more account IDs can be specified. // - // 12-digit account ID: Allow AMIs created by this account. One or more account - // IDs can be specified. + // - none : Allow AMIs created by your own account only. When none is specified, + // no other values can be specified. // - // none : Allow AMIs created by your own account only. When none is specified, no - // other values can be specified. + // Maximum: 200 values ImageProviders []string + // The Amazon Web Services Marketplace product codes for allowed images. + // + // Length: 1-25 characters + // + // Valid characters: Letters ( A–Z, a–z ) and numbers ( 0–9 ) + // + // Maximum: 50 values + MarketplaceProductCodes []string + noSmithyDocumentSerde } @@ -17060,6 +17174,8 @@ type Route struct { // - CreateRoute - The route was manually added to the route table. // // - EnableVgwRoutePropagation - The route was propagated by route propagation. + // + // - Advertisement - The route was created dynamically by Amazon VPC Route Server. Origin RouteOrigin // The state of the route. The blackhole state indicates that the route's target diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index cb68986ec7ac4..ba1764f378c39 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.13.7 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.6 (2025-08-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 88ba2f3928ae7..e65ba0d51bfde 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.6" +const goModuleVersion = "1.13.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 7c3eed8d38b11..152c4089cd07b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.29.3 (2025-09-10) + +* No change notes available for this release. + +# v1.29.2 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.29.1 (2025-08-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go index 82bd25b164267..2b22ab779c2aa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -328,7 +328,9 @@ func (r *resolver) ResolveEndpoint( return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) } _UseDualStack := *params.UseDualStack + _ = _UseDualStack _UseFIPS := *params.UseFIPS + _ = _UseFIPS if exprVal := params.Endpoint; exprVal != nil { _Endpoint := *exprVal diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index ec53123102982..8655c1c7ba710 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.29.1" +const goModuleVersion = "1.29.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 5b6aed74f3fd3..c66053d05bf7f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.34.4 (2025-09-10) + +* No change notes available for this release. + +# v1.34.3 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.34.2 (2025-08-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go index 6feea0c9fec47..1e001f7a9e804 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -328,7 +328,9 @@ func (r *resolver) ResolveEndpoint( return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) } _UseDualStack := *params.UseDualStack + _ = _UseDualStack _UseFIPS := *params.UseFIPS + _ = _UseFIPS if exprVal := params.Endpoint; exprVal != nil { _Endpoint := *exprVal diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 3b20ebb481105..64cd520297f14 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.34.2" +const goModuleVersion = "1.34.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index a6ec400a2ce64..b25d184e19e65 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.38.4 (2025-09-10) + +* No change notes available for this release. + +# v1.38.3 (2025-09-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.38.2 (2025-08-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go index dca2ce3599e46..945682e1a50e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -346,8 +346,11 @@ func (r *resolver) ResolveEndpoint( return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) } _UseDualStack := *params.UseDualStack + _ = _UseDualStack _UseFIPS := *params.UseFIPS + _ = _UseFIPS _UseGlobalEndpoint := *params.UseGlobalEndpoint + _ = _UseGlobalEndpoint if _UseGlobalEndpoint == true { if !(params.Endpoint != nil) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 9700338658533..bf368537e2833 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.38.2" +const goModuleVersion = "1.38.4" diff --git a/vendor/github.com/cilium/charts/README.md b/vendor/github.com/cilium/charts/README.md index b8ef4ba7520e8..17fbb404bd283 100644 --- a/vendor/github.com/cilium/charts/README.md +++ b/vendor/github.com/cilium/charts/README.md @@ -1,5 +1,6 @@ This repository holds helm templates for the following Cilium releases: +* [v1.19.0-pre.0](https://github.com/cilium/cilium/releases/tag/v1.19.0-pre.0) (_[source](https://github.com/cilium/cilium/tree/v1.19.0-pre.0/install/kubernetes/cilium)_) * [v1.18.1](https://github.com/cilium/cilium/releases/tag/v1.18.1) (_[source](https://github.com/cilium/cilium/tree/v1.18.1/install/kubernetes/cilium)_) * [v1.18.0](https://github.com/cilium/cilium/releases/tag/v1.18.0) (_[source](https://github.com/cilium/cilium/tree/v1.18.0/install/kubernetes/cilium)_) * [v1.18.0-rc.1](https://github.com/cilium/cilium/releases/tag/v1.18.0-rc.1) (_[source](https://github.com/cilium/cilium/tree/v1.18.0-rc.1/install/kubernetes/cilium)_) @@ -23,6 +24,7 @@ This repository holds helm templates for the following Cilium releases: * [v1.17.0-pre.2](https://github.com/cilium/cilium/releases/tag/v1.17.0-pre.2) (_[source](https://github.com/cilium/cilium/tree/v1.17.0-pre.2/install/kubernetes/cilium)_) * [v1.17.0-pre.1](https://github.com/cilium/cilium/releases/tag/v1.17.0-pre.1) (_[source](https://github.com/cilium/cilium/tree/v1.17.0-pre.1/install/kubernetes/cilium)_) * [v1.17.0-pre.0](https://github.com/cilium/cilium/releases/tag/v1.17.0-pre.0) (_[source](https://github.com/cilium/cilium/tree/v1.17.0-pre.0/install/kubernetes/cilium)_) +* [v1.16.13](https://github.com/cilium/cilium/releases/tag/v1.16.13) (_[source](https://github.com/cilium/cilium/tree/v1.16.13/install/kubernetes/cilium)_) * [v1.16.12](https://github.com/cilium/cilium/releases/tag/v1.16.12) (_[source](https://github.com/cilium/cilium/tree/v1.16.12/install/kubernetes/cilium)_) * [v1.16.11](https://github.com/cilium/cilium/releases/tag/v1.16.11) (_[source](https://github.com/cilium/cilium/tree/v1.16.11/install/kubernetes/cilium)_) * [v1.16.10](https://github.com/cilium/cilium/releases/tag/v1.16.10) (_[source](https://github.com/cilium/cilium/tree/v1.16.10/install/kubernetes/cilium)_) diff --git a/vendor/github.com/cilium/charts/cilium-1.16.13.tgz b/vendor/github.com/cilium/charts/cilium-1.16.13.tgz new file mode 100644 index 0000000000000..696391adc23b1 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.16.13.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.19.0-pre.0.tgz b/vendor/github.com/cilium/charts/cilium-1.19.0-pre.0.tgz new file mode 100644 index 0000000000000..669fa5c8cc397 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.19.0-pre.0.tgz differ diff --git a/vendor/github.com/cilium/charts/index.yaml b/vendor/github.com/cilium/charts/index.yaml index a511b4950deaa..b7f24e8766f51 100644 --- a/vendor/github.com/cilium/charts/index.yaml +++ b/vendor/github.com/cilium/charts/index.yaml @@ -1,6 +1,111 @@ apiVersion: v1 entries: cilium: + - annotations: + artifacthub.io/crds: "- kind: CiliumNetworkPolicy\n version: v2\n name: ciliumnetworkpolicies.cilium.io\n + \ displayName: Cilium Network Policy\n description: |\n Cilium Network + Policies provide additional functionality beyond what\n is provided by + standard Kubernetes NetworkPolicy such as the ability\n to allow traffic + based on FQDNs, or to filter at Layer 7.\n- kind: CiliumClusterwideNetworkPolicy\n + \ version: v2\n name: ciliumclusterwidenetworkpolicies.cilium.io\n displayName: + Cilium Clusterwide Network Policy\n description: |\n Cilium Clusterwide + Network Policies support configuring network traffic\n policiies across + the entire cluster, including applying node firewalls.\n- kind: CiliumLocalRedirectPolicy\n + \ version: v2\n name: ciliumlocalredirectpolicies.cilium.io\n displayName: + Cilium Local Redirect Policy\n description: |\n Cilium Local Redirect + Policy allows local redirects to be configured\n within a node to support + use cases like Node-Local DNS or KIAM.\n- kind: CiliumNode\n version: v2\n + \ name: ciliumnodes.cilium.io\n displayName: Cilium Node\n description: + |\n Cilium Node represents a node managed by Cilium. It contains a\n specification + to control various node specific configuration aspects\n and a status section + to represent the status of the node.\n- kind: CiliumIdentity\n version: v2\n + \ name: ciliumidentities.cilium.io\n displayName: Cilium Identity\n description: + |\n Cilium Identity allows introspection into security identities that\n + \ Cilium allocates which identify sets of labels that are assigned to\n + \ individual endpoints in the cluster.\n- kind: CiliumEndpoint\n version: + v2\n name: ciliumendpoints.cilium.io\n displayName: Cilium Endpoint\n description: + |\n Cilium Endpoint represents the status of individual pods or nodes in\n + \ the cluster which are managed by Cilium, including enforcement status,\n + \ IP addressing and whether the networking is successfully operational.\n- + kind: CiliumEndpointSlice\n version: v2alpha1\n name: ciliumendpointslices.cilium.io\n + \ displayName: Cilium Endpoint Slice\n description: |\n Cilium Endpoint + Slice represents the status of groups of pods or nodes\n in the cluster + which are managed by Cilium, including enforcement status,\n IP addressing + and whether the networking is successfully operational.\n- kind: CiliumEgressGatewayPolicy\n + \ version: v2\n name: ciliumegressgatewaypolicies.cilium.io\n displayName: + Cilium Egress Gateway Policy\n description: |\n Cilium Egress Gateway + Policy provides control over the way that traffic\n leaves the cluster + and which source addresses to use for that traffic.\n- kind: CiliumClusterwideEnvoyConfig\n + \ version: v2\n name: ciliumclusterwideenvoyconfigs.cilium.io\n displayName: + Cilium Clusterwide Envoy Config\n description: |\n Cilium Clusterwide + Envoy Config specifies Envoy resources and K8s service mappings\n to be + provisioned into Cilium host proxy instances in cluster context.\n- kind: + CiliumEnvoyConfig\n version: v2\n name: ciliumenvoyconfigs.cilium.io\n displayName: + Cilium Envoy Config\n description: |\n Cilium Envoy Config specifies Envoy + resources and K8s service mappings\n to be provisioned into Cilium host + proxy instances in namespace context.\n- kind: CiliumNodeConfig\n version: + v2\n name: ciliumnodeconfigs.cilium.io\n displayName: Cilium Node Configuration\n + \ description: |\n CiliumNodeConfig is a list of configuration key-value + pairs. It is applied to\n nodes indicated by a label selector.\n- kind: + CiliumBGPPeeringPolicy\n version: v2alpha1\n name: ciliumbgppeeringpolicies.cilium.io\n + \ displayName: Cilium BGP Peering Policy\n description: |\n Cilium BGP + Peering Policy instructs Cilium to create specific BGP peering\n configurations.\n- + kind: CiliumBGPClusterConfig\n version: v2alpha1\n name: ciliumbgpclusterconfigs.cilium.io\n + \ displayName: Cilium BGP Cluster Config\n description: |\n Cilium BGP + Cluster Config instructs Cilium operator to create specific BGP cluster\n + \ configurations.\n- kind: CiliumBGPPeerConfig\n version: v2alpha1\n name: + ciliumbgppeerconfigs.cilium.io\n displayName: Cilium BGP Peer Config\n description: + |\n CiliumBGPPeerConfig is a common set of BGP peer configurations. It + can be referenced \n by multiple peers from CiliumBGPClusterConfig.\n- + kind: CiliumBGPAdvertisement\n version: v2alpha1\n name: ciliumbgpadvertisements.cilium.io\n + \ displayName: Cilium BGP Advertisement\n description: |\n CiliumBGPAdvertisement + is used to define source of BGP advertisement as well as BGP attributes \n + \ to be advertised with those prefixes.\n- kind: CiliumBGPNodeConfig\n version: + v2alpha1\n name: ciliumbgpnodeconfigs.cilium.io\n displayName: Cilium BGP + Node Config\n description: |\n CiliumBGPNodeConfig is read only node specific + BGP configuration. It is constructed by Cilium operator.\n It will also + contain node local BGP state information.\n- kind: CiliumBGPNodeConfigOverride\n + \ version: v2alpha1\n name: ciliumbgpnodeconfigoverrides.cilium.io\n displayName: + Cilium BGP Node Config Override\n description: |\n CiliumBGPNodeConfigOverride + can be used to override node specific BGP configuration.\n- kind: CiliumLoadBalancerIPPool\n + \ version: v2\n name: ciliumloadbalancerippools.cilium.io\n displayName: + Cilium Load Balancer IP Pool\n description: |\n Defining a Cilium Load + Balancer IP Pool instructs Cilium to assign IPs to LoadBalancer Services.\n- + kind: CiliumCIDRGroup\n version: v2alpha1\n name: ciliumcidrgroups.cilium.io\n + \ displayName: Cilium CIDR Group\n description: |\n CiliumCIDRGroup is + a list of CIDRs that can be referenced as a single entity from CiliumNetworkPolicies.\n- + kind: CiliumL2AnnouncementPolicy\n version: v2alpha1\n name: ciliuml2announcementpolicies.cilium.io\n + \ displayName: Cilium L2 Announcement Policy\n description: |\n CiliumL2AnnouncementPolicy + is a policy which determines which service IPs will be announced to\n the + local area network, by which nodes, and via which interfaces.\n- kind: CiliumPodIPPool\n + \ version: v2alpha1\n name: ciliumpodippools.cilium.io\n displayName: Cilium + Pod IP Pool\n description: |\n CiliumPodIPPool defines an IP pool that + can be used for pooled IPAM (i.e. the multi-pool IPAM mode).\n- kind: CiliumGatewayClassConfig\n + \ version: v2alpha1\n name: ciliumgatewayclassconfigs.cilium.io\n displayName: + Cilium Gateway Class Config\n description: |\n CiliumGatewayClassConfig + defines a configuration for Gateway API GatewayClass.\n" + apiVersion: v2 + appVersion: 1.19.0-pre.0 + created: "2025-09-03T14:03:43.90553266Z" + description: eBPF-based Networking, Security, and Observability + digest: 70ee18b777431140adecd48afc82c9f9dd905257a259bd1daa2026e1ffc06acb + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.21.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.19.0-pre.0.tgz + version: 1.19.0-pre.0 - annotations: artifacthub.io/crds: "- kind: CiliumNetworkPolicy\n version: v2\n name: ciliumnetworkpolicies.cilium.io\n \ displayName: Cilium Network Policy\n description: |\n Cilium Network @@ -2424,6 +2529,112 @@ entries: urls: - cilium-1.17.0-pre.0.tgz version: 1.17.0-pre.0 + - annotations: + artifacthub.io/crds: "- kind: CiliumNetworkPolicy\n version: v2\n name: ciliumnetworkpolicies.cilium.io\n + \ displayName: Cilium Network Policy\n description: |\n Cilium Network + Policies provide additional functionality beyond what\n is provided by + standard Kubernetes NetworkPolicy such as the ability\n to allow traffic + based on FQDNs, or to filter at Layer 7.\n- kind: CiliumClusterwideNetworkPolicy\n + \ version: v2\n name: ciliumclusterwidenetworkpolicies.cilium.io\n displayName: + Cilium Clusterwide Network Policy\n description: |\n Cilium Clusterwide + Network Policies support configuring network traffic\n policiies across + the entire cluster, including applying node firewalls.\n- kind: CiliumExternalWorkload\n + \ version: v2\n name: ciliumexternalworkloads.cilium.io\n displayName: Cilium + External Workload\n description: |\n Cilium External Workload supports + configuring the ability for external\n non-Kubernetes workloads to join + the cluster.\n- kind: CiliumLocalRedirectPolicy\n version: v2\n name: ciliumlocalredirectpolicies.cilium.io\n + \ displayName: Cilium Local Redirect Policy\n description: |\n Cilium + Local Redirect Policy allows local redirects to be configured\n within + a node to support use cases like Node-Local DNS or KIAM.\n- kind: CiliumNode\n + \ version: v2\n name: ciliumnodes.cilium.io\n displayName: Cilium Node\n + \ description: |\n Cilium Node represents a node managed by Cilium. It + contains a\n specification to control various node specific configuration + aspects\n and a status section to represent the status of the node.\n- + kind: CiliumIdentity\n version: v2\n name: ciliumidentities.cilium.io\n + \ displayName: Cilium Identity\n description: |\n Cilium Identity allows + introspection into security identities that\n Cilium allocates which identify + sets of labels that are assigned to\n individual endpoints in the cluster.\n- + kind: CiliumEndpoint\n version: v2\n name: ciliumendpoints.cilium.io\n displayName: + Cilium Endpoint\n description: |\n Cilium Endpoint represents the status + of individual pods or nodes in\n the cluster which are managed by Cilium, + including enforcement status,\n IP addressing and whether the networking + is successfully operational.\n- kind: CiliumEndpointSlice\n version: v2alpha1\n + \ name: ciliumendpointslices.cilium.io\n displayName: Cilium Endpoint Slice\n + \ description: |\n Cilium Endpoint Slice represents the status of groups + of pods or nodes\n in the cluster which are managed by Cilium, including + enforcement status,\n IP addressing and whether the networking is successfully + operational.\n- kind: CiliumEgressGatewayPolicy\n version: v2\n name: ciliumegressgatewaypolicies.cilium.io\n + \ displayName: Cilium Egress Gateway Policy\n description: |\n Cilium + Egress Gateway Policy provides control over the way that traffic\n leaves + the cluster and which source addresses to use for that traffic.\n- kind: CiliumClusterwideEnvoyConfig\n + \ version: v2\n name: ciliumclusterwideenvoyconfigs.cilium.io\n displayName: + Cilium Clusterwide Envoy Config\n description: |\n Cilium Clusterwide + Envoy Config specifies Envoy resources and K8s service mappings\n to be + provisioned into Cilium host proxy instances in cluster context.\n- kind: + CiliumEnvoyConfig\n version: v2\n name: ciliumenvoyconfigs.cilium.io\n displayName: + Cilium Envoy Config\n description: |\n Cilium Envoy Config specifies Envoy + resources and K8s service mappings\n to be provisioned into Cilium host + proxy instances in namespace context.\n- kind: CiliumBGPPeeringPolicy\n version: + v2alpha1\n name: ciliumbgppeeringpolicies.cilium.io\n displayName: Cilium + BGP Peering Policy\n description: |\n Cilium BGP Peering Policy instructs + Cilium to create specific BGP peering\n configurations.\n- kind: CiliumBGPClusterConfig\n + \ version: v2alpha1\n name: ciliumbgpclusterconfigs.cilium.io\n displayName: + Cilium BGP Cluster Config\n description: |\n Cilium BGP Cluster Config + instructs Cilium operator to create specific BGP cluster\n configurations.\n- + kind: CiliumBGPPeerConfig\n version: v2alpha1\n name: ciliumbgppeerconfigs.cilium.io\n + \ displayName: Cilium BGP Peer Config\n description: |\n CiliumBGPPeerConfig + is a common set of BGP peer configurations. It can be referenced \n by + multiple peers from CiliumBGPClusterConfig.\n- kind: CiliumBGPAdvertisement\n + \ version: v2alpha1\n name: ciliumbgpadvertisements.cilium.io\n displayName: + Cilium BGP Advertisement\n description: |\n CiliumBGPAdvertisement is + used to define source of BGP advertisement as well as BGP attributes \n to + be advertised with those prefixes.\n- kind: CiliumBGPNodeConfig\n version: + v2alpha1\n name: ciliumbgpnodeconfigs.cilium.io\n displayName: Cilium BGP + Node Config\n description: |\n CiliumBGPNodeConfig is read only node specific + BGP configuration. It is constructed by Cilium operator.\n It will also + contain node local BGP state information.\n- kind: CiliumBGPNodeConfigOverride\n + \ version: v2alpha1\n name: ciliumbgpnodeconfigoverrides.cilium.io\n displayName: + Cilium BGP Node Config Override\n description: |\n CiliumBGPNodeConfigOverride + can be used to override node specific BGP configuration.\n- kind: CiliumLoadBalancerIPPool\n + \ version: v2alpha1\n name: ciliumloadbalancerippools.cilium.io\n displayName: + Cilium Load Balancer IP Pool\n description: |\n Defining a Cilium Load + Balancer IP Pool instructs Cilium to assign IPs to LoadBalancer Services.\n- + kind: CiliumNodeConfig\n version: v2alpha1\n name: ciliumnodeconfigs.cilium.io\n + \ displayName: Cilium Node Configuration\n description: |\n CiliumNodeConfig + is a list of configuration key-value pairs. It is applied to\n nodes indicated + by a label selector.\n- kind: CiliumCIDRGroup\n version: v2alpha1\n name: + ciliumcidrgroups.cilium.io\n displayName: Cilium CIDR Group\n description: + |\n CiliumCIDRGroup is a list of CIDRs that can be referenced as a single + entity from CiliumNetworkPolicies.\n- kind: CiliumL2AnnouncementPolicy\n version: + v2alpha1\n name: ciliuml2announcementpolicies.cilium.io\n displayName: Cilium + L2 Announcement Policy\n description: |\n CiliumL2AnnouncementPolicy is + a policy which determines which service IPs will be announced to\n the + local area network, by which nodes, and via which interfaces.\n- kind: CiliumPodIPPool\n + \ version: v2alpha1\n name: ciliumpodippools.cilium.io\n displayName: Cilium + Pod IP Pool\n description: |\n CiliumPodIPPool defines an IP pool that + can be used for pooled IPAM (i.e. the multi-pool IPAM mode).\n" + apiVersion: v2 + appVersion: 1.16.13 + created: "2025-09-04T10:31:51.105837546Z" + description: eBPF-based Networking, Security, and Observability + digest: c46c2263d7807f9836a6a8af24da049e94368284db048b1195ab9d7da7450a52 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.21.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.16.13.tgz + version: 1.16.13 - annotations: artifacthub.io/crds: "- kind: CiliumNetworkPolicy\n version: v2\n name: ciliumnetworkpolicies.cilium.io\n \ displayName: Cilium Network Policy\n description: |\n Cilium Network @@ -25159,4 +25370,4 @@ entries: urls: - tetragon-0.8.0.tgz version: 0.8.0 -generated: "2025-08-15T13:50:42.749503377Z" +generated: "2025-09-04T10:31:51.094037783Z" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 3880635db128d..1401fa715359a 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -1608,6 +1608,8 @@ definitions: Bridge: description: | Name of the default bridge interface when dockerd's --bridge flag is set. + + Deprecated: This field is only set when the daemon is started with the --bridge flag specified. type: "string" example: "docker0" SandboxID: @@ -2234,6 +2236,10 @@ definitions: password: type: "string" email: + description: | + Email is an optional value associated with the username. + + > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" @@ -4392,6 +4398,7 @@ definitions: A counter that triggers an update even if no relevant parameters have been changed. type: "integer" + format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. diff --git a/vendor/github.com/docker/docker/api/types/build/disk_usage.go b/vendor/github.com/docker/docker/api/types/build/disk_usage.go index e969b6d615f2b..cfd7333272c4b 100644 --- a/vendor/github.com/docker/docker/api/types/build/disk_usage.go +++ b/vendor/github.com/docker/docker/api/types/build/disk_usage.go @@ -1,6 +1,8 @@ package build // CacheDiskUsage contains disk usage for the build cache. +// +// Deprecated: this type is no longer used and will be removed in the next release. type CacheDiskUsage struct { TotalSize int64 Reclaimable int64 diff --git a/vendor/github.com/docker/docker/api/types/container/disk_usage.go b/vendor/github.com/docker/docker/api/types/container/disk_usage.go index 05b6cbe9c7097..d77538c2ab55e 100644 --- a/vendor/github.com/docker/docker/api/types/container/disk_usage.go +++ b/vendor/github.com/docker/docker/api/types/container/disk_usage.go @@ -1,6 +1,8 @@ package container // DiskUsage contains disk usage for containers. +// +// Deprecated: this type is no longer used and will be removed in the next release. type DiskUsage struct { TotalSize int64 Reclaimable int64 diff --git a/vendor/github.com/docker/docker/api/types/container/network_settings.go b/vendor/github.com/docker/docker/api/types/container/network_settings.go index afec0e54323ee..687145f2953a7 100644 --- a/vendor/github.com/docker/docker/api/types/container/network_settings.go +++ b/vendor/github.com/docker/docker/api/types/container/network_settings.go @@ -13,8 +13,11 @@ type NetworkSettings struct { } // NetworkSettingsBase holds networking state for a container when inspecting it. +// +// Deprecated: Most fields in NetworkSettingsBase are deprecated. Fields which aren't deprecated will move to +// NetworkSettings in v29.0, and this struct will be removed. type NetworkSettingsBase struct { - Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag. + Bridge string // Deprecated: This field is only set when the daemon is started with the --bridge flag specified. SandboxID string // SandboxID uniquely represents a container's network stack SandboxKey string // SandboxKey identifies the sandbox Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port @@ -35,18 +38,44 @@ type NetworkSettingsBase struct { SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release. } -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. +// DefaultNetworkSettings holds the networking state for the default bridge, if the container is connected to that +// network. +// +// Deprecated: this struct is deprecated since Docker v1.11 and will be removed in v29. You should look for the default +// network in NetworkSettings.Networks instead. type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network + // EndpointID uniquely represents a service endpoint in a Sandbox + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + EndpointID string + // Gateway holds the gateway address for the network + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + Gateway string + // GlobalIPv6Address holds network's global IPv6 address + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + GlobalIPv6Address string + // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + GlobalIPv6PrefixLen int + // IPAddress holds the IPv4 address for the network + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + IPAddress string + // IPPrefixLen represents mask length of network's IPv4 address + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + IPPrefixLen int + // IPv6Gateway holds gateway address specific for IPv6 + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + IPv6Gateway string + // MacAddress holds the MAC address for the network + // + // Deprecated: This field will be removed in v29. You should look for the default network in NetworkSettings.Networks instead. + MacAddress string } // NetworkSettingsSummary provides a summary of container's networks diff --git a/vendor/github.com/docker/docker/api/types/filters/filters_deprecated.go b/vendor/github.com/docker/docker/api/types/filters/filters_deprecated.go new file mode 100644 index 0000000000000..4504cd7a7fefc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/filters_deprecated.go @@ -0,0 +1,61 @@ +package filters + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/versions" +) + +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: do not use in any new code; use ToJSON instead +func ToParamWithVersion(version string, a Args) (string, error) { + out, err := ToJSON(a) + if out == "" || err != nil { + return "", nil + } + if version != "" && versions.LessThan(version, "1.22") { + return encodeLegacyFilters(out) + } + return out, nil +} + +// encodeLegacyFilters encodes Args in the legacy format as used in API v1.21 and older. +// where values are a list of strings, instead of a set. +// +// Don't use in any new code; use [filters.ToJSON]] instead. +func encodeLegacyFilters(currentFormat string) (string, error) { + // The Args.fields field is not exported, but used to marshal JSON, + // so we'll marshal to the new format, then unmarshal to get the + // fields, and marshal again. + // + // This is far from optimal, but this code is only used for deprecated + // API versions, so should not be hit commonly. + var argsFields map[string]map[string]bool + err := json.Unmarshal([]byte(currentFormat), &argsFields) + if err != nil { + return "", err + } + + buf, err := json.Marshal(convertArgsToSlice(argsFields)) + if err != nil { + return "", err + } + return string(buf), nil +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 86f4bdb28e170..396657bb1921b 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -8,8 +8,6 @@ import ( "encoding/json" "regexp" "strings" - - "github.com/docker/docker/api/types/versions" ) // Args stores a mapping of keys to a set of multiple values. @@ -63,24 +61,6 @@ func ToJSON(a Args) (string, error) { return string(buf), err } -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: do not use in any new code; use ToJSON instead -func ToParamWithVersion(version string, a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - - if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err - } - - return ToJSON(a) -} - // FromJSON decodes a JSON encoded string into Args func FromJSON(p string) (Args, error) { args := NewArgs() @@ -320,17 +300,3 @@ func deprecatedArgs(d map[string][]string) map[string]map[string]bool { } return m } - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/docker/api/types/image/disk_usage.go b/vendor/github.com/docker/docker/api/types/image/disk_usage.go index b29d925cac48b..e847386a8d13f 100644 --- a/vendor/github.com/docker/docker/api/types/image/disk_usage.go +++ b/vendor/github.com/docker/docker/api/types/image/disk_usage.go @@ -1,6 +1,8 @@ package image // DiskUsage contains disk usage for images. +// +// Deprecated: this type is no longer used and will be removed in the next release. type DiskUsage struct { TotalSize int64 Reclaimable int64 diff --git a/vendor/github.com/docker/docker/api/types/network/endpoint.go b/vendor/github.com/docker/docker/api/types/network/endpoint.go index 167ac70ab56a0..cdc06c6c90011 100644 --- a/vendor/github.com/docker/docker/api/types/network/endpoint.go +++ b/vendor/github.com/docker/docker/api/types/network/endpoint.go @@ -4,8 +4,6 @@ import ( "errors" "fmt" "net" - - "github.com/docker/docker/internal/multierror" ) // EndpointSettings stores the network endpoint details @@ -99,7 +97,7 @@ func (cfg *EndpointIPAMConfig) IsInRange(v4Subnets []NetworkSubnet, v6Subnets [] errs = append(errs, err) } - return multierror.Join(errs...) + return errJoin(errs...) } func validateEndpointIPAddress(epAddr string, ipamSubnets []NetworkSubnet) error { @@ -149,5 +147,5 @@ func (cfg *EndpointIPAMConfig) Validate() error { } } - return multierror.Join(errs...) + return errJoin(errs...) } diff --git a/vendor/github.com/docker/docker/api/types/network/ipam.go b/vendor/github.com/docker/docker/api/types/network/ipam.go index f319e1402b081..f9a9ff9b358fa 100644 --- a/vendor/github.com/docker/docker/api/types/network/ipam.go +++ b/vendor/github.com/docker/docker/api/types/network/ipam.go @@ -4,8 +4,7 @@ import ( "errors" "fmt" "net/netip" - - "github.com/docker/docker/internal/multierror" + "strings" ) // IPAM represents IP Address Management @@ -72,7 +71,7 @@ func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error { } } - if err := multierror.Join(errs...); err != nil { + if err := errJoin(errs...); err != nil { return fmt.Errorf("invalid network config:\n%w", err) } @@ -132,3 +131,43 @@ func validateAddress(address string, subnet netip.Prefix, subnetFamily ipFamily) return nil } + +func errJoin(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + if len(e.errs) == 1 { + return strings.TrimSpace(e.errs[0].Error()) + } + stringErrs := make([]string, 0, len(e.errs)) + for _, subErr := range e.errs { + stringErrs = append(stringErrs, strings.ReplaceAll(subErr.Error(), "\n", "\n\t")) + } + return "* " + strings.Join(stringErrs, "\n* ") +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go index fa9037bdadfd8..4c6d7ab2badf3 100644 --- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go +++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go @@ -32,8 +32,8 @@ type AuthConfig struct { Auth string `json:"auth,omitempty"` // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. + // + // Deprecated: This field is deprecated since docker 1.11 (API v1.23) and will be removed in the next release. Email string `json:"email,omitempty"` ServerAddress string `json:"serveraddress,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go index 8a28320f7b854..3fda4ca65d1d5 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -1,5 +1,7 @@ package swarm +import "github.com/docker/docker/api/types/swarm/runtime" + // RuntimeType is the type of runtime used for the TaskSpec type RuntimeType string @@ -25,3 +27,11 @@ const ( type NetworkAttachmentSpec struct { ContainerID string } + +// RuntimeSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type RuntimeSpec = runtime.PluginSpec + +// RuntimePrivilege describes a permission the user has to accept +// upon installing a plugin. +type RuntimePrivilege = runtime.PluginPrivilege diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 90e572cf9c90b..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc --gogofaster_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index 32aaf0d519904..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,808 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: plugin.proto - -package runtime - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges,proto3" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_22a625af4bc1cc87, []int{0} -} -func (m *PluginSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginSpec.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PluginSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginSpec.Merge(m, src) -} -func (m *PluginSpec) XXX_Size() int { - return m.Size() -} -func (m *PluginSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PluginSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginSpec proto.InternalMessageInfo - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -func (m *PluginSpec) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value,proto3" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { - return fileDescriptor_22a625af4bc1cc87, []int{1} -} -func (m *PluginPrivilege) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginPrivilege) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginPrivilege.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PluginPrivilege) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginPrivilege.Merge(m, src) -} -func (m *PluginPrivilege) XXX_Size() int { - return m.Size() -} -func (m *PluginPrivilege) XXX_DiscardUnknown() { - xxx_messageInfo_PluginPrivilege.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginPrivilege proto.InternalMessageInfo - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} - -func init() { proto.RegisterFile("plugin.proto", fileDescriptor_22a625af4bc1cc87) } - -var fileDescriptor_22a625af4bc1cc87 = []byte{ - // 225 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, - 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x9a, 0xc1, 0xc8, 0xc5, 0x15, 0x00, 0x16, - 0x08, 0x2e, 0x48, 0x4d, 0x16, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, - 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x85, 0xc4, 0xb8, 0xd8, 0x8a, 0x52, 0x73, 0xf3, 0x4b, 0x52, 0x25, - 0x98, 0xc0, 0xa2, 0x50, 0x9e, 0x90, 0x01, 0x17, 0x57, 0x41, 0x51, 0x66, 0x59, 0x66, 0x4e, 0x6a, - 0x7a, 0x6a, 0xb1, 0x04, 0xb3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x80, 0x1e, 0xc4, 0xb0, 0x00, 0x98, - 0x44, 0x10, 0x92, 0x1a, 0x21, 0x29, 0x2e, 0x8e, 0x94, 0xcc, 0xe2, 0xc4, 0xa4, 0x9c, 0xd4, 0x14, - 0x09, 0x16, 0x05, 0x46, 0x0d, 0x8e, 0x20, 0x38, 0x5f, 0x48, 0x80, 0x8b, 0x39, 0x35, 0xaf, 0x4c, - 0x82, 0x55, 0x81, 0x59, 0x83, 0x33, 0x08, 0xc4, 0x54, 0x8a, 0xe5, 0xe2, 0x47, 0x33, 0x0c, 0xab, - 0xf3, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, - 0x6e, 0x44, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x91, 0x33, - 0x08, 0xc2, 0x71, 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, - 0x70, 0xd0, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x37, 0xea, 0xe2, 0xca, 0x2a, 0x01, 0x00, - 0x00, -} - -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Env[iNdEx]) - copy(dAtA[i:], m.Env[iNdEx]) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Env[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if m.Disabled { - i-- - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Privileges) > 0 { - for iNdEx := len(m.Privileges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Privileges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPlugin(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Remote) > 0 { - i -= len(m.Remote) - copy(dAtA[i:], m.Remote) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginPrivilege) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - for iNdEx := len(m.Value) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Value[iNdEx]) - copy(dAtA[i:], m.Value[iNdEx]) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Value[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - offset -= sovPlugin(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PluginSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPlugin - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPlugin - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index e311b36ba2cf5..0000000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; - repeated string env = 5; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/runtime.go new file mode 100644 index 0000000000000..95176b268186b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/runtime.go @@ -0,0 +1,27 @@ +package runtime + +import "fmt" + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `json:"name,omitempty"` + Remote string `json:"remote,omitempty"` + Privileges []*PluginPrivilege `json:"privileges,omitempty"` + Disabled bool `json:"disabled,omitempty"` + Env []string `json:"env,omitempty"` +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Value []string `json:"value,omitempty"` +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") // Deprecated: this error was only used internally and is no longer used. + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") // Deprecated: this error was only used internally and is no longer used. + ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group") // Deprecated: this error was only used internally and is no longer used. +) diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index 4dc95e8b1ddee..e143f844fa8f4 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -4,7 +4,6 @@ import ( "time" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm/runtime" ) // TaskState represents the state of a task. @@ -77,7 +76,7 @@ type TaskSpec struct { // NetworkAttachmentSpec is used if the `Runtime` field is set to // `attachment`. ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` + PluginSpec *RuntimeSpec `json:",omitempty"` NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` Resources *ResourceRequirements `json:",omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/system/disk_usage.go b/vendor/github.com/docker/docker/api/types/system/disk_usage.go deleted file mode 100644 index 99078cf196d09..0000000000000 --- a/vendor/github.com/docker/docker/api/types/system/disk_usage.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -import ( - "github.com/docker/docker/api/types/build" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/volume" -) - -// DiskUsage contains response of Engine API for API 1.49 and greater: -// GET "/system/df" -type DiskUsage struct { - Images *image.DiskUsage - Containers *container.DiskUsage - Volumes *volume.DiskUsage - BuildCache *build.CacheDiskUsage -} diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go index 8456a45607e2a..c9c20b8736046 100644 --- a/vendor/github.com/docker/docker/api/types/types_deprecated.go +++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go @@ -46,15 +46,16 @@ type NetworkSettings = container.NetworkSettings // NetworkSettingsBase holds networking state for a container when inspecting it. // -// Deprecated: use [container.NetworkSettingsBase]. -type NetworkSettingsBase = container.NetworkSettingsBase +// Deprecated: [container.NetworkSettingsBase] will be removed in v29. Prefer +// accessing the fields it contains through [container.NetworkSettings]. +type NetworkSettingsBase = container.NetworkSettingsBase //nolint:staticcheck // ignore SA1019: NetworkSettingsBase is deprecated in v28.4. // DefaultNetworkSettings holds network information // during the 2 release deprecation period. // It will be removed in Docker 1.11. // // Deprecated: use [container.DefaultNetworkSettings]. -type DefaultNetworkSettings = container.DefaultNetworkSettings +type DefaultNetworkSettings = container.DefaultNetworkSettings //nolint:staticcheck // ignore SA1019: DefaultNetworkSettings is deprecated in v28.4. // SummaryNetworkSettings provides a summary of container's networks // in /containers/json. diff --git a/vendor/github.com/docker/docker/api/types/volume/disk_usage.go b/vendor/github.com/docker/docker/api/types/volume/disk_usage.go index 3d716c6e00d90..88974303a0b02 100644 --- a/vendor/github.com/docker/docker/api/types/volume/disk_usage.go +++ b/vendor/github.com/docker/docker/api/types/volume/disk_usage.go @@ -1,6 +1,8 @@ package volume // DiskUsage contains disk usage for volumes. +// +// Deprecated: this type is no longer used and will be removed in the next release. type DiskUsage struct { TotalSize int64 Reclaimable int64 diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index d6e014dddf81f..8acfb7f490e41 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -463,7 +463,9 @@ func (cli *Client) dialer() func(context.Context) (net.Conn, error) { case "unix": return net.Dial(cli.proto, cli.addr) case "npipe": - return sockets.DialPipe(cli.addr, 32*time.Second) + ctx, cancel := context.WithTimeout(ctx, 32*time.Second) + defer cancel() + return dialPipeContext(ctx, cli.addr) default: if tlsConfig := cli.tlsConfig(); tlsConfig != nil { return tls.Dial(cli.proto, cli.addr, tlsConfig) diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index e5b921b40642f..1fb9fbfb9e550 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -2,6 +2,17 @@ package client +import ( + "context" + "net" + "syscall" +) + // DefaultDockerHost defines OS-specific default host if the DOCKER_HOST // (EnvOverrideHost) environment variable is unset or empty. const DefaultDockerHost = "unix:///var/run/docker.sock" + +// dialPipeContext connects to a Windows named pipe. It is not supported on non-Windows. +func dialPipeContext(_ context.Context, _ string) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go index 19b954b2fd784..b471c0612403e 100644 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -1,5 +1,17 @@ package client +import ( + "context" + "net" + + "github.com/Microsoft/go-winio" +) + // DefaultDockerHost defines OS-specific default host if the DOCKER_HOST // (EnvOverrideHost) environment variable is unset or empty. const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +// dialPipeContext connects to a Windows named pipe. It is not supported on non-Windows. +func dialPipeContext(ctx context.Context, addr string) (net.Conn, error) { + return winio.DialPipeContext(ctx, addr) +} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index 2244e0f4b94a4..076954f4c3e80 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -28,7 +28,7 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea return container.StatsResponseReader{ Body: resp.Body, - OSType: getDockerOS(resp.Header.Get("Server")), + OSType: resp.Header.Get("Ostype"), }, nil } @@ -51,6 +51,6 @@ func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string return container.StatsResponseReader{ Body: resp.Body, - OSType: getDockerOS(resp.Header.Get("Server")), + OSType: resp.Header.Get("Ostype"), }, nil } diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index 66ca75e4af285..1ed0878bfdb2d 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -40,7 +40,7 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio return build.ImageBuildResponse{ Body: resp.Body, - OSType: getDockerOS(resp.Header.Get("Server")), + OSType: resp.Header.Get("Ostype"), }, nil } diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go index 67e1e6934b59d..7b82f185ac58f 100644 --- a/vendor/github.com/docker/docker/client/utils.go +++ b/vendor/github.com/docker/docker/client/utils.go @@ -8,12 +8,9 @@ import ( cerrdefs "github.com/containerd/errdefs" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/internal/lazyregexp" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) -var headerRegexp = lazyregexp.New(`\ADocker/.+\s\((.+)\)\z`) - type emptyIDError string func (e emptyIDError) InvalidParameter() {} @@ -31,16 +28,6 @@ func trimID(objType, id string) (string, error) { return id, nil } -// getDockerOS returns the operating system based on the server header from the daemon. -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} - // getFiltersQuery returns a url query with "filters" query term, based on the // filters provided. func getFiltersQuery(f filters.Args) (url.Values, error) { diff --git a/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go b/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go deleted file mode 100644 index 6334edb60dca3..0000000000000 --- a/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code below was largely copied from golang.org/x/mod@v0.22; -// https://github.com/golang/mod/blob/v0.22.0/internal/lazyregexp/lazyre.go -// with some additional methods added. - -// Package lazyregexp is a thin wrapper over regexp, allowing the use of global -// regexp variables without forcing them to be compiled at init. -package lazyregexp - -import ( - "os" - "regexp" - "strings" - "sync" -) - -// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be -// compiled the first time it is needed. -type Regexp struct { - str string - once sync.Once - rx *regexp.Regexp -} - -func (r *Regexp) re() *regexp.Regexp { - r.once.Do(r.build) - return r.rx -} - -func (r *Regexp) build() { - r.rx = regexp.MustCompile(r.str) - r.str = "" -} - -func (r *Regexp) FindSubmatch(s []byte) [][]byte { - return r.re().FindSubmatch(s) -} - -func (r *Regexp) FindAllStringSubmatch(s string, n int) [][]string { - return r.re().FindAllStringSubmatch(s, n) -} - -func (r *Regexp) FindStringSubmatch(s string) []string { - return r.re().FindStringSubmatch(s) -} - -func (r *Regexp) FindStringSubmatchIndex(s string) []int { - return r.re().FindStringSubmatchIndex(s) -} - -func (r *Regexp) ReplaceAllString(src, repl string) string { - return r.re().ReplaceAllString(src, repl) -} - -func (r *Regexp) FindString(s string) string { - return r.re().FindString(s) -} - -func (r *Regexp) FindAllString(s string, n int) []string { - return r.re().FindAllString(s, n) -} - -func (r *Regexp) MatchString(s string) bool { - return r.re().MatchString(s) -} - -func (r *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { - return r.re().ReplaceAllStringFunc(src, repl) -} - -func (r *Regexp) SubexpNames() []string { - return r.re().SubexpNames() -} - -var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") - -// New creates a new lazy regexp, delaying the compiling work until it is first -// needed. If the code is being run as part of tests, the regexp compiling will -// happen immediately. -func New(str string) *Regexp { - lr := &Regexp{str: str} - if inTest { - // In tests, always compile the regexps early. - lr.re() - } - return lr -} diff --git a/vendor/github.com/docker/docker/internal/multierror/multierror.go b/vendor/github.com/docker/docker/internal/multierror/multierror.go deleted file mode 100644 index e899f4de85c90..0000000000000 --- a/vendor/github.com/docker/docker/internal/multierror/multierror.go +++ /dev/null @@ -1,46 +0,0 @@ -package multierror - -import ( - "strings" -) - -// Join is a drop-in replacement for errors.Join with better formatting. -func Join(errs ...error) error { - n := 0 - for _, err := range errs { - if err != nil { - n++ - } - } - if n == 0 { - return nil - } - e := &joinError{ - errs: make([]error, 0, n), - } - for _, err := range errs { - if err != nil { - e.errs = append(e.errs, err) - } - } - return e -} - -type joinError struct { - errs []error -} - -func (e *joinError) Error() string { - if len(e.errs) == 1 { - return strings.TrimSpace(e.errs[0].Error()) - } - stringErrs := make([]string, 0, len(e.errs)) - for _, subErr := range e.errs { - stringErrs = append(stringErrs, strings.ReplaceAll(subErr.Error(), "\n", "\n\t")) - } - return "* " + strings.Join(stringErrs, "\n* ") -} - -func (e *joinError) Unwrap() []error { - return e.errs -} diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go index bb3014464e522..58819e872a28f 100644 --- a/vendor/github.com/google/cel-go/cel/env.go +++ b/vendor/github.com/google/cel-go/cel/env.go @@ -27,6 +27,7 @@ import ( "github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/env" + "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/stdlib" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" @@ -142,6 +143,9 @@ type Env struct { validators []ASTValidator costOptions []checker.CostOption + funcBindOnce sync.Once + functionBindings []*functions.Overload + // Internal parser representation prsr *parser.Parser prsrOpts []parser.Option @@ -320,18 +324,19 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) { return nil, err } return (&Env{ - variables: []*decls.VariableDecl{}, - functions: map[string]*decls.FunctionDecl{}, - macros: []parser.Macro{}, - Container: containers.DefaultContainer, - adapter: registry, - provider: registry, - features: map[int]bool{}, - appliedFeatures: map[int]bool{}, - libraries: map[string]SingletonLibrary{}, - validators: []ASTValidator{}, - progOpts: []ProgramOption{}, - costOptions: []checker.CostOption{}, + variables: []*decls.VariableDecl{}, + functions: map[string]*decls.FunctionDecl{}, + functionBindings: []*functions.Overload{}, + macros: []parser.Macro{}, + Container: containers.DefaultContainer, + adapter: registry, + provider: registry, + features: map[int]bool{}, + appliedFeatures: map[int]bool{}, + libraries: map[string]SingletonLibrary{}, + validators: []ASTValidator{}, + progOpts: []ProgramOption{}, + costOptions: []checker.CostOption{}, }).configure(opts) } diff --git a/vendor/github.com/google/cel-go/cel/folding.go b/vendor/github.com/google/cel-go/cel/folding.go index 40d843ecea302..d1ea6b19dbe13 100644 --- a/vendor/github.com/google/cel-go/cel/folding.go +++ b/vendor/github.com/google/cel-go/cel/folding.go @@ -38,7 +38,7 @@ func MaxConstantFoldIterations(limit int) ConstantFoldingOption { } } -// Adds an Activation which provides known values for the folding evaluator +// FoldKnownValues adds an Activation which provides known values for the folding evaluator // // Any values the activation provides will be used by the constant folder and turned into // literals in the AST. diff --git a/vendor/github.com/google/cel-go/cel/program.go b/vendor/github.com/google/cel-go/cel/program.go index 24f41a4a77ee0..ec3869bdb4a35 100644 --- a/vendor/github.com/google/cel-go/cel/program.go +++ b/vendor/github.com/google/cel-go/cel/program.go @@ -20,6 +20,7 @@ import ( "sync" "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" @@ -191,16 +192,25 @@ func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) { } } - // Add the function bindings created via Function() options. - for _, fn := range e.functions { - bindings, err := fn.Bindings() - if err != nil { - return nil, err - } - err = disp.Add(bindings...) - if err != nil { - return nil, err + e.funcBindOnce.Do(func() { + var bindings []*functions.Overload + e.functionBindings = []*functions.Overload{} + for _, fn := range e.functions { + bindings, err = fn.Bindings() + if err != nil { + return + } + e.functionBindings = append(e.functionBindings, bindings...) } + }) + if err != nil { + return nil, err + } + + // Add the function bindings created via Function() options. + err = disp.Add(e.functionBindings...) + if err != nil { + return nil, err } // Set the attribute factory after the options have been set. diff --git a/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl b/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl index d6b3da5c6c0be..d0b0133f151f0 100644 --- a/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl +++ b/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl @@ -1,4 +1,8 @@ -{{define "variable"}}{{.Name}} is a {{.Type}} +{{define "variable"}}{{.Name}} is a {{.Type}}{{if .Description}} + +{{range split .Description}} {{.}} +{{end}} +{{- end -}} {{- end -}} {{define "macro" -}} diff --git a/vendor/github.com/google/cel-go/cel/validator.go b/vendor/github.com/google/cel-go/cel/validator.go index 5f06b2dd55f30..952f88f41b8b3 100644 --- a/vendor/github.com/google/cel-go/cel/validator.go +++ b/vendor/github.com/google/cel-go/cel/validator.go @@ -45,6 +45,14 @@ var ( astValidatorFactories = map[string]ASTValidatorFactory{ nestingLimitValidatorName: func(val *env.Validator) (ASTValidator, error) { if limit, found := val.ConfigValue("limit"); found { + // In case of protos, config value is of type by google.protobuf.Value, which numeric values are always a double. + if val, isDouble := limit.(float64); isDouble { + if val != float64(int64(val)) { + return nil, fmt.Errorf("invalid validator: %s, limit value is not a whole number: %v", nestingLimitValidatorName, limit) + } + return ValidateComprehensionNestingLimit(int(val)), nil + } + if val, isInt := limit.(int); isInt { return ValidateComprehensionNestingLimit(val), nil } diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go index bdd474c95af9a..171494f075a9e 100644 --- a/vendor/github.com/google/cel-go/common/types/pb/type.go +++ b/vendor/github.com/google/cel-go/common/types/pb/type.go @@ -472,7 +472,7 @@ func unwrap(desc description, msg proto.Message) (any, bool, error) { } return v.GetValue(), true, nil } - return msg, false, nil + return unwrapDynamic(desc, msg.ProtoReflect()) } // unwrapDynamic unwraps a reflected protobuf Message value. diff --git a/vendor/github.com/google/cel-go/ext/native.go b/vendor/github.com/google/cel-go/ext/native.go index 661984cbb1271..ceaa274b740ba 100644 --- a/vendor/github.com/google/cel-go/ext/native.go +++ b/vendor/github.com/google/cel-go/ext/native.go @@ -609,7 +609,8 @@ func newNativeTypes(fieldNameHandler NativeTypesFieldNameHandler, rawType reflec var iterateStructMembers func(reflect.Type) iterateStructMembers = func(t reflect.Type) { if k := t.Kind(); k == reflect.Pointer || k == reflect.Slice || k == reflect.Array || k == reflect.Map { - t = t.Elem() + iterateStructMembers(t.Elem()) + return } if t.Kind() != reflect.Struct { return diff --git a/vendor/github.com/petermattis/goid/runtime_go1.23.go b/vendor/github.com/petermattis/goid/runtime_go1.23.go index 146d81734a02d..45da87379f1f1 100644 --- a/vendor/github.com/petermattis/goid/runtime_go1.23.go +++ b/vendor/github.com/petermattis/goid/runtime_go1.23.go @@ -1,5 +1,5 @@ -//go:build gc && go1.23 -// +build gc,go1.23 +//go:build gc && go1.23 && !go1.25 +// +build gc,go1.23,!go1.25 package goid diff --git a/vendor/github.com/petermattis/goid/runtime_go1.25.go b/vendor/github.com/petermattis/goid/runtime_go1.25.go new file mode 100644 index 0000000000000..ae3ce8319b270 --- /dev/null +++ b/vendor/github.com/petermattis/goid/runtime_go1.25.go @@ -0,0 +1,37 @@ +//go:build gc && go1.25 +// +build gc,go1.25 + +package goid + +type stack struct { + lo uintptr + hi uintptr +} + +type gobuf struct { + sp uintptr + pc uintptr + g uintptr + ctxt uintptr + lr uintptr + bp uintptr +} + +type g struct { + stack stack + stackguard0 uintptr + stackguard1 uintptr + + _panic uintptr + _defer uintptr + m uintptr + sched gobuf + syscallsp uintptr + syscallpc uintptr + syscallbp uintptr + stktopsp uintptr + param uintptr + atomicstatus uint32 + stackLock uint32 + goid int64 // Here it is! +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index ad347113c0480..2331b8b4f3b49 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const help: help, variableLabels: variableLabels.compile(), } - if !model.IsValidMetricName(model.LabelValue(fqName)) { + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + if !model.NameValidationScheme.IsValidMetricName(fqName) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index f7f97ef92624a..d273b6640e4e8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -67,7 +67,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) } // Our current conversion moves to legacy naming, so use legacy validation. - valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) + valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c21911f292d4e..5fe8d3b4d2949 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error { } func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 1258508e4f937..80a4d7c35585d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -262,7 +262,7 @@ func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNa // convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of // dto.MetricFamily. func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { - var tp expfmt.TextParser + tp := expfmt.NewTextParser(model.UTF8Validation) notNormalized, err := tp.TextToMetricFamilies(reader) if err != nil { return nil, fmt.Errorf("converting reader to metric families failed: %w", err) diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 1448439b7f722..7b762370e270e 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -70,19 +70,34 @@ func ResponseFormat(h http.Header) Format { return FmtUnknown } -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. +// NewDecoder returns a new decoder based on the given input format. Metric +// names are validated based on the provided Format -- if the format requires +// escaping, raditional Prometheues validity checking is used. Otherwise, names +// are checked for UTF-8 validity. Supported formats include delimited protobuf +// and Prometheus text format. For historical reasons, this decoder fallbacks +// to classic text decoding for any other format. This decoder does not fully +// support OpenMetrics although it may often succeed due to the similarities +// between the formats. This decoder may not support the latest features of +// Prometheus text format and is not intended for high-performance applications. +// See: https://github.com/prometheus/common/issues/812 func NewDecoder(r io.Reader, format Format) Decoder { + scheme := model.LegacyValidation + if format.ToEscapingScheme() == model.NoEscaping { + scheme = model.UTF8Validation + } switch format.FormatType() { case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r)} + return &protoDecoder{r: bufio.NewReader(r), s: scheme} + case TypeProtoText, TypeProtoCompact: + return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)} } - return &textDecoder{r: r} + return &textDecoder{r: r, s: scheme} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { r protodelim.Reader + s model.ValidationScheme } // Decode implements the Decoder interface. @@ -93,7 +108,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + if !d.s.IsValidMetricName(v.GetName()) { return fmt.Errorf("invalid metric name %q", v.GetName()) } for _, m := range v.GetMetric() { @@ -107,7 +122,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if !model.LabelValue(l.GetValue()).IsValid() { return fmt.Errorf("invalid label value %q", l.GetValue()) } - if !model.LabelName(l.GetName()).IsValid() { + if !d.s.IsValidLabelName(l.GetName()) { return fmt.Errorf("invalid label name %q", l.GetName()) } } @@ -115,10 +130,20 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { return nil } +// errDecoder is an error-state decoder that always returns the same error. +type errDecoder struct { + err error +} + +func (d *errDecoder) Decode(*dto.MetricFamily) error { + return d.err +} + // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader fams map[string]*dto.MetricFamily + s model.ValidationScheme err error } @@ -126,7 +151,7 @@ type textDecoder struct { func (d *textDecoder) Decode(v *dto.MetricFamily) error { if d.err == nil { // Read all metrics in one shot. - var p TextParser + p := NewTextParser(d.s) d.fams, d.err = p.TextToMetricFamilies(d.r) // If we don't get an error, store io.EOF for the end. if d.err == nil { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index d7f3d76f55d97..73c24dfbc9cb5 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,14 +18,12 @@ import ( "io" "net/http" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" "github.com/prometheus/common/model" - - "github.com/munnerz/goautoneg" - - dto "github.com/prometheus/client_model/go" ) // Encoder types encode metric families into an underlying wire protocol. @@ -61,7 +59,7 @@ func (ec encoderCloser) Close() error { // appropriate accepted type is found, FmtText is returned (which is the // Prometheus text format). This function will never negotiate FmtOpenMetrics, // as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. +// FmtOpenMetrics, use NegotiateIncludingOpenMetrics. func Negotiate(h http.Header) Format { escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { @@ -153,7 +151,7 @@ func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := protodelim.MarshalTo(w, v) + _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index b26886560d74b..c34c7de432b1d 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -36,9 +36,11 @@ const ( ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + //nolint:revive // Allow for underscores. OpenMetricsVersion_0_0_1 = "0.0.1" + //nolint:revive // Allow for underscores. OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. Do not do direct @@ -54,8 +56,10 @@ const ( // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) @@ -188,8 +192,8 @@ func (f Format) FormatType() FormatType { // Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid // "escaping" term exists, that will be used. Otherwise, the global default will // be returned. -func (format Format) ToEscapingScheme() model.EscapingScheme { - for _, p := range strings.Split(string(format), ";") { +func (f Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(f), ";") { toks := strings.Split(p, "=") if len(toks) != 2 { continue diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index dfac962a4e7e8..0290f6abc40b3 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -17,7 +17,11 @@ package expfmt -import "bytes" +import ( + "bytes" + + "github.com/prometheus/common/model" +) // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // @@ -26,9 +30,8 @@ import "bytes" // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { - parser := TextParser{} + parser := NewTextParser(model.UTF8Validation) _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - if err != nil { return 0 } diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index a21ed4ec1f8cd..8dbf6d04ed651 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,10 @@ import ( "strconv" "strings" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/types/known/timestamppb" "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" ) type encoderOption struct { @@ -249,7 +248,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E // Finally the samples, one line for each. if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { - compliantName = compliantName + "_total" + compliantName += "_total" } for _, metric := range in.Metric { switch metricType { @@ -477,7 +476,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -641,11 +640,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - err = (*e).Timestamp.CheckValid() + err = e.Timestamp.CheckValid() if err != nil { return written, err } - ts := (*e).Timestamp.AsTime() + ts := e.Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 4b86434b33275..c4e9c1bbc3a79 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -22,9 +22,9 @@ import ( "strings" "sync" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" ) // enhancedWriter has all the enhanced write functions needed here. bufio.Writer @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(name) { + if model.LegacyValidation.IsValidMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 4067978a178eb..8f2edde32447f 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -78,6 +78,14 @@ type TextParser struct { // These indicate if the metric name from the current line being parsed is inside // braces and if that metric name was found respectively. currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool + // scheme sets the desired ValidationScheme for names. Defaults to the invalid + // UnsetValidation. + scheme model.ValidationScheme +} + +// NewTextParser returns a new TextParser with the provided nameValidationScheme. +func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser { + return TextParser{scheme: nameValidationScheme} } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -126,6 +134,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF func (p *TextParser) reset(in io.Reader) { p.metricFamiliesByName = map[string]*dto.MetricFamily{} + p.currentLabelPairs = nil if p.buf == nil { p.buf = bufio.NewReader(in) } else { @@ -216,6 +225,9 @@ func (p *TextParser) startComment() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -244,6 +256,9 @@ func (p *TextParser) readingMetricName() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } // Now is the time to fix the type if it hasn't happened yet. if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() @@ -311,6 +326,9 @@ func (p *TextParser) startLabelName() stateFn { switch p.currentByte { case ',': p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -319,6 +337,10 @@ func (p *TextParser) startLabelName() stateFn { return p.startLabelName case '}': p.setOrCreateCurrentMF() + if p.err != nil { + p.currentLabelPairs = nil + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -341,6 +363,12 @@ func (p *TextParser) startLabelName() stateFn { p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + p.currentLabelPairs = nil + return nil + } + if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) { + p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName())) + p.currentLabelPairs = nil return nil } // Special summary/histogram treatment. Don't add 'quantile' and 'le' @@ -353,13 +381,12 @@ func (p *TextParser) startLabelName() stateFn { labels := make(map[string]struct{}) for _, l := range p.currentLabelPairs { lName := l.GetName() - if _, exists := labels[lName]; !exists { - labels[lName] = struct{}{} - } else { + if _, exists := labels[lName]; exists { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) p.currentLabelPairs = nil return nil } + labels[lName] = struct{}{} } return p.startLabelValue } @@ -440,7 +467,8 @@ func (p *TextParser) readingValue() stateFn { // When we are here, we have read all the labels, so for the // special case of a summary/histogram, we can finally find out // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { + switch p.currentMF.GetType() { + case dto.MetricType_SUMMARY: signature := model.LabelsToSignature(p.currentLabels) if summary := p.summaries[signature]; summary != nil { p.currentMetric = summary @@ -448,7 +476,7 @@ func (p *TextParser) readingValue() stateFn { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + case dto.MetricType_HISTOGRAM: signature := model.LabelsToSignature(p.currentLabels) if histogram := p.histograms[signature]; histogram != nil { p.currentMetric = histogram @@ -456,7 +484,7 @@ func (p *TextParser) readingValue() stateFn { p.histograms[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else { + default: p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } if p.readTokenUntilWhitespace(); p.err != nil { @@ -805,6 +833,10 @@ func (p *TextParser) setOrCreateCurrentMF() { p.currentIsHistogramCount = false p.currentIsHistogramSum = false name := p.currentToken.String() + if !p.scheme.IsValidMetricName(name) { + p.parseError(fmt.Sprintf("invalid metric name %q", name)) + return + } if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { return } diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index de83afe93e90f..dfeb34be5f3ae 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -32,6 +32,12 @@ const ( // MetricNameLabel is the label name indicating the metric name of a // timeseries. MetricNameLabel = "__name__" + // MetricTypeLabel is the label name indicating the metric type of + // timeseries as per the PROM-39 proposal. + MetricTypeLabel = "__type__" + // MetricUnitLabel is the label name indicating the metric unit of + // timeseries as per the PROM-39 proposal. + MetricUnitLabel = "__unit__" // SchemeLabel is the name of the label that holds the scheme on which to // scrape a target. @@ -100,34 +106,21 @@ type LabelName string // IsValid returns true iff the name matches the pattern of LabelNameRE when // NameValidationScheme is set to LegacyValidation, or valid UTF-8 if // NameValidationScheme is set to UTF8Validation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [ValidationScheme.IsValidLabelName] instead. func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - switch NameValidationScheme { - case LegacyValidation: - return ln.IsValidLegacy() - case UTF8Validation: - return utf8.ValidString(string(ln)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidLabelName(string(ln)) } // IsValidLegacy returns true iff name matches the pattern of LabelNameRE for // legacy names. It does not use LabelNameRE for the check but a much faster // hardcoded implementation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [LegacyValidation.IsValidLabelName] instead. func (ln LabelName) IsValidLegacy() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - // TODO: Apply De Morgan's law. Make sure there are tests for this. - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck - return false - } - } - return true + return LegacyValidation.IsValidLabelName(string(ln)) } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index d0ad88da3346c..9de47b2568eea 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -114,10 +114,10 @@ func (ls LabelSet) Clone() LabelSet { } // Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) +func (ls LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(ls)) - for k, v := range l { + for k, v := range ls { result[k] = v } @@ -140,7 +140,7 @@ func (ls LabelSet) FastFingerprint() Fingerprint { } // UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { +func (ls *LabelSet) UnmarshalJSON(b []byte) error { var m map[LabelName]LabelValue if err := json.Unmarshal(b, &m); err != nil { return err @@ -153,6 +153,6 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error { return fmt.Errorf("%q is not a valid label name", ln) } } - *l = LabelSet(m) + *ls = LabelSet(m) return nil } diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index a6b01755bd4c8..3feebf328ae61 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,6 +14,7 @@ package model import ( + "encoding/json" "errors" "fmt" "regexp" @@ -23,6 +24,7 @@ import ( "unicode/utf8" dto "github.com/prometheus/client_model/go" + "go.yaml.in/yaml/v2" "google.golang.org/protobuf/proto" ) @@ -62,16 +64,151 @@ var ( type ValidationScheme int const ( + // UnsetValidation represents an undefined ValidationScheme. + // Should not be used in practice. + UnsetValidation ValidationScheme = iota + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. - LegacyValidation ValidationScheme = iota + LegacyValidation // UTF8Validation only requires that metric and label names be valid UTF-8 // strings. UTF8Validation ) +var _ interface { + yaml.Marshaler + yaml.Unmarshaler + json.Marshaler + json.Unmarshaler + fmt.Stringer +} = new(ValidationScheme) + +// String returns the string representation of s. +func (s ValidationScheme) String() string { + switch s { + case UnsetValidation: + return "unset" + case LegacyValidation: + return "legacy" + case UTF8Validation: + return "utf8" + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (s ValidationScheme) MarshalYAML() (any, error) { + switch s { + case UnsetValidation: + return "", nil + case LegacyValidation, UTF8Validation: + return s.String(), nil + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error { + var scheme string + if err := unmarshal(&scheme); err != nil { + return err + } + return s.Set(scheme) +} + +// MarshalJSON implements the json.Marshaler interface. +func (s ValidationScheme) MarshalJSON() ([]byte, error) { + switch s { + case UnsetValidation: + return json.Marshal("") + case UTF8Validation, LegacyValidation: + return json.Marshal(s.String()) + default: + return nil, fmt.Errorf("unhandled ValidationScheme: %d", s) + } +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error { + var repr string + if err := json.Unmarshal(bytes, &repr); err != nil { + return err + } + return s.Set(repr) +} + +// Set implements the pflag.Value interface. +func (s *ValidationScheme) Set(text string) error { + switch text { + case "": + // Don't change the value. + case LegacyValidation.String(): + *s = LegacyValidation + case UTF8Validation.String(): + *s = UTF8Validation + default: + return fmt.Errorf("unrecognized ValidationScheme: %q", text) + } + return nil +} + +// IsValidMetricName returns whether metricName is valid according to s. +func (s ValidationScheme) IsValidMetricName(metricName string) bool { + switch s { + case LegacyValidation: + if len(metricName) == 0 { + return false + } + for i, b := range metricName { + if !isValidLegacyRune(b, i) { + return false + } + } + return true + case UTF8Validation: + if len(metricName) == 0 { + return false + } + return utf8.ValidString(metricName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String())) + } +} + +// IsValidLabelName returns whether labelName is valid according to s. +func (s ValidationScheme) IsValidLabelName(labelName string) bool { + switch s { + case LegacyValidation: + if len(labelName) == 0 { + return false + } + for i, b := range labelName { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck + return false + } + } + return true + case UTF8Validation: + if len(labelName) == 0 { + return false + } + return utf8.ValidString(labelName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s)) + } +} + +// Type implements the pflag.Value interface. +func (ValidationScheme) Type() string { + return "validationScheme" +} + type EscapingScheme int const ( @@ -101,7 +238,7 @@ const ( // Accept header, the default NameEscapingScheme will be used. EscapingKey = "escaping" - // Possible values for Escaping Key: + // Possible values for Escaping Key. AllowUTF8 = "allow-utf-8" // No escaping required. EscapeUnderscores = "underscores" EscapeDots = "dots" @@ -175,34 +312,22 @@ func (m Metric) FastFingerprint() Fingerprint { // IsValidMetricName returns true iff name matches the pattern of MetricNameRE // for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is // selected. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [ValidationScheme.IsValidMetricName] instead. func IsValidMetricName(n LabelValue) bool { - switch NameValidationScheme { - case LegacyValidation: - return IsValidLegacyMetricName(string(n)) - case UTF8Validation: - if len(n) == 0 { - return false - } - return utf8.ValidString(string(n)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidMetricName(string(n)) } // IsValidLegacyMetricName is similar to IsValidMetricName but always uses the // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [LegacyValidation.IsValidMetricName] instead. func IsValidLegacyMetricName(n string) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !isValidLegacyRune(b, i) { - return false - } - } - return true + return LegacyValidation.IsValidMetricName(n) } // EscapeMetricFamily escapes the given metric names and labels with the given @@ -310,13 +435,14 @@ func EscapeName(name string, scheme EscapingScheme) string { case DotsEscaping: // Do not early return for legacy valid names, we still escape underscores. for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if b == '.' { + case b == '.': escaped.WriteString("_dot_") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else { + default: escaped.WriteString("__") } } @@ -327,13 +453,14 @@ func EscapeName(name string, scheme EscapingScheme) string { } escaped.WriteString("U__") for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else if !utf8.ValidRune(b) { + case !utf8.ValidRune(b): escaped.WriteString("_FFFD_") - } else { + default: escaped.WriteRune('_') escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') @@ -345,7 +472,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } } -// lower function taken from strconv.atoi +// lower function taken from strconv.atoi. func lower(c byte) byte { return c | ('x' - 'X') } @@ -409,11 +536,12 @@ func UnescapeName(name string, scheme EscapingScheme) string { } r := lower(escapedName[i]) utf8Val *= 16 - if r >= '0' && r <= '9' { + switch { + case r >= '0' && r <= '9': utf8Val += uint(r) - '0' - } else if r >= 'a' && r <= 'f' { + case r >= 'a' && r <= 'f': utf8Val += uint(r) - 'a' + 10 - } else { + default: return name } i++ diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index fed9e87b9151e..1730b0fdc1210 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -126,14 +126,14 @@ func (t *Time) UnmarshalJSON(b []byte) error { p := strings.Split(string(b), ".") switch len(p) { case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } *t = Time(v * second) case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } @@ -143,7 +143,7 @@ func (t *Time) UnmarshalJSON(b []byte) error { if prec < 0 { p[1] = p[1][:dotPrecision] } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) + p[1] += strings.Repeat("0", prec) } va, err := strconv.ParseInt(p[1], 10, 32) @@ -170,15 +170,15 @@ func (t *Time) UnmarshalJSON(b []byte) error { // This type should not propagate beyond the scope of input/output processing. type Duration time.Duration -// Set implements pflag/flag.Value +// Set implements pflag/flag.Value. func (d *Duration) Set(s string) error { var err error *d, err = ParseDuration(s) return err } -// Type implements pflag.Value -func (d *Duration) Type() string { +// Type implements pflag.Value. +func (*Duration) Type() string { return "duration" } diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 8050637d82230..a9995a37eeeaa 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -191,7 +191,8 @@ func (ss SampleStream) String() string { } func (ss SampleStream) MarshalJSON() ([]byte, error) { - if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + switch { + case len(ss.Histograms) > 0 && len(ss.Values) > 0: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -202,7 +203,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else if len(ss.Histograms) > 0 { + case len(ss.Histograms) > 0: v := struct { Metric Metric `json:"metric"` Histograms []SampleHistogramPair `json:"histograms"` @@ -211,7 +212,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else { + default: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -258,7 +259,7 @@ func (s Scalar) String() string { // MarshalJSON implements json.Marshaler. func (s Scalar) MarshalJSON() ([]byte, error) { v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) + return json.Marshal([...]interface{}{s.Timestamp, v}) } // UnmarshalJSON implements json.Unmarshaler. @@ -349,9 +350,9 @@ func (m Matrix) Len() int { return len(m) } func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) +func (m Matrix) String() string { + matCp := make(Matrix, len(m)) + copy(matCp, m) sort.Sort(matCp) strs := make([]string, len(matCp)) diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 895e6a3e8393e..91ce5b7a45c21 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -86,22 +86,22 @@ func (s *HistogramBucket) Equal(o *HistogramBucket) bool { return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) } -func (b HistogramBucket) String() string { +func (s HistogramBucket) String() string { var sb strings.Builder - lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 - upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3 + upperInclusive := s.Boundaries == 0 || s.Boundaries == 3 if lowerInclusive { sb.WriteRune('[') } else { sb.WriteRune('(') } - fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper) if upperInclusive { sb.WriteRune(']') } else { sb.WriteRune(')') } - fmt.Fprintf(&sb, ":%v", b.Count) + fmt.Fprintf(&sb, ":%v", s.Count) return sb.String() } diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go index 726c50ee638c6..078910f46b732 100644 --- a/vendor/github.com/prometheus/common/model/value_type.go +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -66,8 +66,8 @@ func (et *ValueType) UnmarshalJSON(b []byte) error { return nil } -func (e ValueType) String() string { - switch e { +func (et ValueType) String() string { + switch et { case ValNone: return "" case ValScalar: diff --git a/vendor/github.com/sasha-s/go-deadlock/Readme.md b/vendor/github.com/sasha-s/go-deadlock/Readme.md index 792d8a205a439..a1eb793c00ded 100644 --- a/vendor/github.com/sasha-s/go-deadlock/Readme.md +++ b/vendor/github.com/sasha-s/go-deadlock/Readme.md @@ -1,4 +1,4 @@ -# Online deadlock detection in go (golang). [![Try it online](https://img.shields.io/badge/try%20it-online-blue.svg)](https://wandbox.org/permlink/hJc6QCZowxbNm9WW) [![Docs](https://godoc.org/github.com/sasha-s/go-deadlock?status.svg)](https://godoc.org/github.com/sasha-s/go-deadlock) [![Build Status](https://travis-ci.com/sasha-s/go-deadlock.svg?branch=master)](https://travis-ci.com/sasha-s/go-deadlock) [![codecov](https://codecov.io/gh/sasha-s/go-deadlock/branch/master/graph/badge.svg)](https://codecov.io/gh/sasha-s/go-deadlock) [![version](https://badge.fury.io/gh/sasha-s%2Fgo-deadlock.svg)](https://github.com/sasha-s/go-deadlock/releases) [![Go Report Card](https://goreportcard.com/badge/github.com/sasha-s/go-deadlock)](https://goreportcard.com/report/github.com/sasha-s/go-deadlock) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +# Online deadlock detection in go (golang). [![Try it online](https://img.shields.io/badge/try%20it-online-blue.svg)](https://wandbox.org/permlink/hJc6QCZowxbNm9WW) [![Docs](https://godoc.org/github.com/sasha-s/go-deadlock?status.svg)](https://godoc.org/github.com/sasha-s/go-deadlock) [![codecov](https://codecov.io/gh/sasha-s/go-deadlock/branch/master/graph/badge.svg)](https://codecov.io/gh/sasha-s/go-deadlock) [![version](https://badge.fury.io/gh/sasha-s%2Fgo-deadlock.svg)](https://github.com/sasha-s/go-deadlock/releases) [![Go Report Card](https://goreportcard.com/badge/github.com/sasha-s/go-deadlock)](https://goreportcard.com/report/github.com/sasha-s/go-deadlock) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) ## Why Deadlocks happen and are painful to debug. diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 2c8f4808c1a32..6acf8ab1ea049 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -12,14 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +version: "2" + run: - deadline: 5m + timeout: 5m + +formatters: + enable: + - gofmt + - goimports linters: - disable-all: true + default: none enable: #- bodyclose - # - deadcode ! deprecated since v1.49.0; replaced by 'unused' #- depguard #- dogsled #- dupl @@ -30,28 +36,24 @@ linters: - goconst - gocritic #- gocyclo - - gofmt - - goimports - #- gomnd #- goprintffuncname - gosec - - gosimple - govet - ineffassign #- lll - misspell + #- mnd #- nakedret #- noctx - nolintlint #- rowserrcheck - #- scopelint - staticcheck - #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - - stylecheck - #- typecheck - unconvert #- unparam - unused - # - varcheck ! deprecated since v1.49.0; replaced by 'unused' #- whitespace - fast: false + exclusions: + presets: + - common-false-positives + - legacy + - std-error-handling diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 71757151c333b..8416275f48ee0 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,8 +1,14 @@ - -![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e) +
+ +cobra-logo + +
Cobra is a library for creating powerful modern CLI applications. +Visit Cobra.dev for extensive documentation + + Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. @@ -11,6 +17,20 @@ name a few. [This list](site/content/projects_using_cobra.md) contains a more ex [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) +
+
+ Supported by: +
+
+ + Warp sponsorship + + +### [Warp, the AI terminal for devs](https://www.warp.dev/cobra) +[Try Cobra in Warp today](https://www.warp.dev/cobra)
+ +
+
# Overview diff --git a/vendor/github.com/spf13/cobra/SECURITY.md b/vendor/github.com/spf13/cobra/SECURITY.md new file mode 100644 index 0000000000000..54e60c28c14ce --- /dev/null +++ b/vendor/github.com/spf13/cobra/SECURITY.md @@ -0,0 +1,105 @@ +# Security Policy + +## Reporting a Vulnerability + +The `cobra` maintainers take security issues seriously and +we appreciate your efforts to _**responsibly**_ disclose your findings. +We will make every effort to swiftly respond and address concerns. + +To report a security vulnerability: + +1. **DO NOT** create a public GitHub issue for the vulnerability! +2. **DO NOT** create a public GitHub Pull Request with a fix for the vulnerability! +3. Send an email to `cobra-security@googlegroups.com`. +4. Include the following details in your report: + - Description of the vulnerability + - Steps to reproduce + - Potential impact of the vulnerability (to your downstream project, to the Go ecosystem, etc.) + - Any potential mitigations you've already identified +5. Allow up to 7 days for an initial response. + You should receive an acknowledgment of your report and an estimated timeline for a fix. +6. (Optional) If you have a fix and would like to contribute your patch, please work + directly with the maintainers via `cobra-security@googlegroups.com` to + coordinate pushing the patch to GitHub, cutting a new release, and disclosing the change. + +## Response Process + +When a security vulnerability report is received, the `cobra` maintainers will: + +1. Confirm receipt of the vulnerability report within 7 days. +2. Assess the report to determine if it constitutes a security vulnerability. +3. If confirmed, assign the vulnerability a severity level and create a timeline for addressing it. +4. Develop and test a fix. +5. Patch the vulnerability and make a new GitHub release: the maintainers will coordinate disclosure with the reporter. +6. Create a new GitHub Security Advisory to inform the broader Go ecosystem + +## Disclosure Policy + +The `cobra` maintainers follow a coordinated disclosure process: + +1. Security vulnerabilities will be addressed as quickly as possible. +2. A CVE (Common Vulnerabilities and Exposures) identifier will be requested for significant vulnerabilities + that are within `cobra` itself. +3. Once a fix is ready, the maintainers will: + - Release a new version containing the fix. + - Update the security advisory with details about the vulnerability. + - Credit the reporter (unless they wish to remain anonymous). + - Credit the fixer (unless they wish to remain anonymous, this may be the same as the reporter). + - Announce the vulnerability through appropriate channels + (GitHub Security Advisory, mailing lists, GitHub Releases, etc.) + +## Supported Versions + +Security fixes will typically only be released for the most recent major release. + +## Upstream Security Issues + +`cobra` generally will not accept vulnerability reports that originate in upstream +dependencies. I.e., if there is a problem in Go code that `cobra` depends on, +it is best to engage that project's maintainers and owners. + +This security policy primarily pertains only to `cobra` itself but if you believe you've +identified a problem that originates in an upstream dependency and is being widely +distributed by `cobra`, please follow the disclosure procedure above: the `cobra` +maintainers will work with you to determine the severity and ecosystem impact. + +## Security Updates and CVEs + +Information about known security vulnerabilities and CVEs affecting `cobra` will +be published as GitHub Security Advisories at +https://github.com/spf13/cobra/security/advisories. + +All users are encouraged to watch the repository and upgrade promptly when +security releases are published. + +## `cobra` Security Best Practices for Users + +When using `cobra` in your CLIs, the `cobra` maintainers recommend the following: + +1. Always use the latest version of `cobra`. +2. [Use Go modules](https://go.dev/blog/using-go-modules) for dependency management. +3. Always use the latest possible version of Go. + +## Security Best Practices for Contributors + +When contributing to `cobra`: + +1. Be mindful of security implications when adding new features or modifying existing ones. +2. Be aware of `cobra`'s extremely large reach: it is used in nearly every Go CLI + (like Kubernetes, Docker, Prometheus, etc. etc.) +3. Write tests that explicitly cover edge cases and potential issues. +4. If you discover a security issue while working on `cobra`, please report it + following the process above rather than opening a public pull request or issue that + addresses the vulnerability. +5. Take personal sec-ops seriously and secure your GitHub account: use [two-factor authentication](https://docs.github.com/en/authentication/securing-your-account-with-two-factor-authentication-2fa), + [sign your commits with a GPG or SSH key](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification), + etc. + +## Acknowledgments + +The `cobra` maintainers would like to thank all security researchers and +community members who help keep cobra, its users, and the entire Go ecosystem secure through responsible disclosures!! + +--- + +*This security policy is inspired by the [Open Web Application Security Project (OWASP)](https://owasp.org/) guidelines and security best practices.* diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index dbb2c298ba089..78088db69ca25 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -39,7 +39,7 @@ const ( ) // FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist +type FParseErrWhitelist flag.ParseErrorsAllowlist // Group Structure to manage groups for commands type Group struct { @@ -1296,6 +1296,11 @@ Simply type ` + c.DisplayName() + ` help [path to command] for full details.`, c.Printf("Unknown help topic %#q\n", args) CheckErr(c.Root().Usage()) } else { + // FLow the context down to be used in help text + if cmd.ctx == nil { + cmd.ctx = c.ctx + } + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown CheckErr(cmd.Help()) @@ -1872,7 +1877,7 @@ func (c *Command) ParseFlags(args []string) error { c.mergePersistentFlags() // do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) + c.Flags().ParseErrorsAllowlist = flag.ParseErrorsAllowlist(c.FParseErrWhitelist) err := c.Flags().Parse(args) // Print warnings if they occurred (e.g. deprecated flag messages). @@ -2020,7 +2025,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error { fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) } if c.HasHelpSubCommands() { - fmt.Fprintf(w, "\n\nAdditional help topcis:") + fmt.Fprintf(w, "\n\nAdditional help topics:") for _, subcmd := range c.Commands() { if subcmd.IsAdditionalHelpTopicCommand() { fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index a1752f7631758..d3607c2d2fefd 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -115,6 +115,13 @@ type CompletionOptions struct { DisableDescriptions bool // HiddenDefaultCmd makes the default 'completion' command hidden HiddenDefaultCmd bool + // DefaultShellCompDirective sets the ShellCompDirective that is returned + // if no special directive can be determined + DefaultShellCompDirective *ShellCompDirective +} + +func (receiver *CompletionOptions) SetDefaultShellCompDirective(directive ShellCompDirective) { + receiver.DefaultShellCompDirective = &directive } // Completion is a string that can be used for completions @@ -375,7 +382,7 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo // Error while attempting to parse flags if flagErr != nil { // If error type is flagCompError and we don't want flagCompletion we should ignore the error - if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + if _, ok := flagErr.(*flagCompError); !ok || flagCompletion { return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr } } @@ -480,6 +487,14 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo } } else { directive = ShellCompDirectiveDefault + // check current and parent commands for a custom DefaultShellCompDirective + for cmd := finalCmd; cmd != nil; cmd = cmd.parent { + if cmd.CompletionOptions.DefaultShellCompDirective != nil { + directive = *cmd.CompletionOptions.DefaultShellCompDirective + break + } + } + if flag == nil { foundLocalNonPersistentFlag := false // If TraverseChildren is true on the root command we don't check for @@ -773,7 +788,7 @@ See each sub-command's help for details on how to use the generated script. // shell completion for it (prog __complete completion '') subCmd, cmdArgs, err := c.Find(args) if err != nil || subCmd.Name() != compCmdName && - !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) { + (subCmd.Name() != ShellCompRequestCmd || len(cmdArgs) <= 1 || cmdArgs[0] != compCmdName) { // The completion command is not being called or being completed so we remove it. c.RemoveCommand(completionCmd) return diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go index 2138f24882b35..560bc20c75da7 100644 --- a/vendor/github.com/spf13/cobra/doc/man_docs.go +++ b/vendor/github.com/spf13/cobra/doc/man_docs.go @@ -212,7 +212,7 @@ func genMan(cmd *cobra.Command, header *GenManHeader) []byte { manPrintOptions(buf, cmd) if len(cmd.Example) > 0 { buf.WriteString("# EXAMPLE\n") - buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example)) + fmt.Fprintf(buf, "```\n%s\n```\n", cmd.Example) } if hasSeeAlso(cmd) { buf.WriteString("# SEE ALSO\n") @@ -240,7 +240,7 @@ func genMan(cmd *cobra.Command, header *GenManHeader) []byte { buf.WriteString(strings.Join(seealsos, ", ") + "\n") } if !cmd.DisableAutoGenTag { - buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006"))) + fmt.Fprintf(buf, "# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006")) } return buf.Bytes() } diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go index 12592223ba47b..6eae7ccfb6d90 100644 --- a/vendor/github.com/spf13/cobra/doc/md_docs.go +++ b/vendor/github.com/spf13/cobra/doc/md_docs.go @@ -69,12 +69,12 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) } if cmd.Runnable() { - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine())) + fmt.Fprintf(buf, "```\n%s\n```\n\n", cmd.UseLine()) } if len(cmd.Example) > 0 { buf.WriteString("### Examples\n\n") - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example)) + fmt.Fprintf(buf, "```\n%s\n```\n\n", cmd.Example) } if err := printOptions(buf, cmd, name); err != nil { @@ -87,7 +87,7 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) pname := parent.CommandPath() link := pname + markdownExtension link = strings.ReplaceAll(link, " ", "_") - buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short)) + fmt.Fprintf(buf, "* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short) cmd.VisitParents(func(c *cobra.Command) { if c.DisableAutoGenTag { cmd.DisableAutoGenTag = c.DisableAutoGenTag @@ -105,7 +105,7 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) cname := name + " " + child.Name() link := cname + markdownExtension link = strings.ReplaceAll(link, " ", "_") - buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short)) + fmt.Fprintf(buf, "* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short) } buf.WriteString("\n") } diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go index c33acc2baa91a..4901ca9801c9d 100644 --- a/vendor/github.com/spf13/cobra/doc/rest_docs.go +++ b/vendor/github.com/spf13/cobra/doc/rest_docs.go @@ -82,13 +82,13 @@ func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, str buf.WriteString("\n" + long + "\n\n") if cmd.Runnable() { - buf.WriteString(fmt.Sprintf("::\n\n %s\n\n", cmd.UseLine())) + fmt.Fprintf(buf, "::\n\n %s\n\n", cmd.UseLine()) } if len(cmd.Example) > 0 { buf.WriteString("Examples\n") buf.WriteString("~~~~~~~~\n\n") - buf.WriteString(fmt.Sprintf("::\n\n%s\n\n", indentString(cmd.Example, " "))) + fmt.Fprintf(buf, "::\n\n%s\n\n", indentString(cmd.Example, " ")) } if err := printOptionsReST(buf, cmd, name); err != nil { @@ -101,7 +101,7 @@ func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, str parent := cmd.Parent() pname := parent.CommandPath() ref = strings.ReplaceAll(pname, " ", "_") - buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(pname, ref), parent.Short)) + fmt.Fprintf(buf, "* %s \t - %s\n", linkHandler(pname, ref), parent.Short) cmd.VisitParents(func(c *cobra.Command) { if c.DisableAutoGenTag { cmd.DisableAutoGenTag = c.DisableAutoGenTag @@ -118,7 +118,7 @@ func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, str } cname := name + " " + child.Name() ref = strings.ReplaceAll(cname, " ", "_") - buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(cname, ref), child.Short)) + fmt.Fprintf(buf, "* %s \t - %s\n", linkHandler(cname, ref), child.Short) } buf.WriteString("\n") } diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go index 2b26d6ec0f3ef..3719717037170 100644 --- a/vendor/github.com/spf13/cobra/doc/yaml_docs.go +++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go @@ -153,7 +153,7 @@ func genFlagResult(flags *pflag.FlagSet) []cmdOption { // Todo, when we mark a shorthand is deprecated, but specify an empty message. // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. - if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { + if len(flag.ShorthandDeprecated) == 0 && len(flag.Shorthand) > 0 { opt := cmdOption{ flag.Name, flag.Shorthand, diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index d4dfbc5ea0b3e..2fd3c57597a23 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -137,12 +137,17 @@ const ( PanicOnError ) -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { +// ParseErrorsAllowlist defines the parsing errors that can be ignored +type ParseErrorsAllowlist struct { // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags UnknownFlags bool } +// ParseErrorsWhitelist defines the parsing errors that can be ignored. +// +// Deprecated: use [ParseErrorsAllowlist] instead. This type will be removed in a future release. +type ParseErrorsWhitelist = ParseErrorsAllowlist + // NormalizedName is a flag name that has been normalized according to rules // for the FlagSet (e.g. making '-' and '_' equivalent). type NormalizedName string @@ -158,8 +163,13 @@ type FlagSet struct { // help/usage messages. SortFlags bool - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist + // ParseErrorsAllowlist is used to configure an allowlist of errors + ParseErrorsAllowlist ParseErrorsAllowlist + + // ParseErrorsAllowlist is used to configure an allowlist of errors. + // + // Deprecated: use [FlagSet.ParseErrorsAllowlist] instead. This field will be removed in a future release. + ParseErrorsWhitelist ParseErrorsAllowlist name string parsed bool @@ -928,7 +938,6 @@ func VarP(value Value, name, shorthand, usage string) { // returns the error. func (f *FlagSet) fail(err error) error { if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -986,6 +995,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin f.usage() return a, ErrHelp case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // --unknown=unknownval arg ... // we do not want to lose arg in this case if len(split) >= 2 { @@ -1044,6 +1055,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = ErrHelp return case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // '-f=arg arg ...' // we do not want to lose arg in this case if len(shorthands) > 2 && shorthands[1] == '=' { @@ -1158,12 +1171,12 @@ func (f *FlagSet) Parse(arguments []string) error { } f.parsed = true + f.args = make([]string, 0, len(arguments)) + if len(arguments) == 0 { return nil } - f.args = make([]string, 0, len(arguments)) - set := func(flag *Flag, value string) error { return f.Set(flag.Name, value) } @@ -1174,7 +1187,10 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - fmt.Println(err) + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) @@ -1200,6 +1216,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) case ContinueOnError: return err case ExitOnError: + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go index f563907e28ffc..e62eab53810c3 100644 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -8,6 +8,7 @@ import ( goflag "flag" "reflect" "strings" + "time" ) // go test flags prefixes @@ -113,6 +114,38 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) } +// CopyToGoFlagSet will add all current flags to the given Go flag set. +// Deprecation remarks get copied into the usage description. +// Whenever possible, a flag gets added for which Go flags shows +// a proper type in the help message. +func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) { + f.VisitAll(func(flag *Flag) { + usage := flag.Usage + if flag.Deprecated != "" { + usage += " (DEPRECATED: " + flag.Deprecated + ")" + } + + switch value := flag.Value.(type) { + case *stringValue: + newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage) + case *intValue: + newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage) + case *int64Value: + newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage) + case *uintValue: + newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage) + case *uint64Value: + newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage) + case *durationValue: + newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage) + case *float64Value: + newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage) + default: + newSet.Var(flag.Value, flag.Name, usage) + } + }) +} + // ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(), // since by default those are skipped by pflag.Parse(). // Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)` @@ -125,3 +158,4 @@ func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error { } return goFlagSet.Parse(skippedFlags) } + diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go index 890a01afc0300..1d1e3bf91a35e 100644 --- a/vendor/github.com/spf13/pflag/string_to_string.go +++ b/vendor/github.com/spf13/pflag/string_to_string.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/csv" "fmt" + "sort" "strings" ) @@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string { } func (s *stringToStringValue) String() string { + keys := make([]string, 0, len(*s.value)) + for k := range *s.value { + keys = append(keys, k) + } + sort.Strings(keys) + records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { + for _, k := range keys { + v := (*s.value)[k] records = append(records, k+"="+v) } diff --git a/vendor/github.com/spf13/pflag/time.go b/vendor/github.com/spf13/pflag/time.go index dc024807e0c75..3dee424791a88 100644 --- a/vendor/github.com/spf13/pflag/time.go +++ b/vendor/github.com/spf13/pflag/time.go @@ -48,7 +48,13 @@ func (d *timeValue) Type() string { return "time" } -func (d *timeValue) String() string { return d.Time.Format(time.RFC3339Nano) } +func (d *timeValue) String() string { + if d.Time.IsZero() { + return "" + } else { + return d.Time.Format(time.RFC3339Nano) + } +} // GetTime return the time value of a flag with the given name func (f *FlagSet) GetTime(name string) (time.Time, error) { diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go deleted file mode 100644 index 73687de748ada..0000000000000 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.5 - -package plan9 - -import "syscall" - -func fixwd() { - syscall.Fixwd() -} - -func Getwd() (wd string, err error) { - return syscall.Getwd() -} - -func Chdir(path string) error { - return syscall.Chdir(path) -} diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go index fb945821847a8..7a76489db161c 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -2,22 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !go1.5 - package plan9 +import "syscall" + func fixwd() { + syscall.Fixwd() } func Getwd() (wd string, err error) { - fd, err := open(".", O_RDONLY) - if err != nil { - return "", err - } - defer Close(fd) - return Fd2path(fd) + return syscall.Getwd() } func Chdir(path string) error { - return chdir(path) + return syscall.Chdir(path) } diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go index 6e5c81acd0491..3c7a6d6e2f1d2 100644 --- a/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -38,9 +38,7 @@ func SchedSetaffinity(pid int, set *CPUSet) error { // Zero clears the set s, so that it contains no CPUs. func (s *CPUSet) Zero() { - for i := range s { - s[i] = 0 - } + clear(s[:]) } func cpuBitsIndex(cpu int) int { diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index abc3955477c7d..18a3d9bdabc1c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Kill(pid int, signum syscall.Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) -//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten +//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen //sys Lstat(path string, stat *Stat_t) (err error) //sys Madvise(b []byte, advice int) (err error) //sys Mkdir(path string, mode uint32) (err error) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index c6545413c45b4..b4609c20c24df 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -72,7 +72,7 @@ import ( //go:cgo_import_dynamic libc_kill kill "libc.so" //go:cgo_import_dynamic libc_lchown lchown "libc.so" //go:cgo_import_dynamic libc_link link "libc.so" -//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so" +//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so" //go:cgo_import_dynamic libc_lstat lstat "libc.so" //go:cgo_import_dynamic libc_madvise madvise "libc.so" //go:cgo_import_dynamic libc_mkdir mkdir "libc.so" @@ -221,7 +221,7 @@ import ( //go:linkname procKill libc_kill //go:linkname procLchown libc_lchown //go:linkname procLink libc_link -//go:linkname proc__xnet_llisten libc___xnet_llisten +//go:linkname proc__xnet_listen libc___xnet_listen //go:linkname procLstat libc_lstat //go:linkname procMadvise libc_madvise //go:linkname procMkdir libc_mkdir @@ -371,7 +371,7 @@ var ( procKill, procLchown, procLink, - proc__xnet_llisten, + proc__xnet_listen, procLstat, procMadvise, procMkdir, @@ -1178,7 +1178,7 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index cd236443f6453..944e75a11cb1d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -632,6 +632,8 @@ const ( IFA_FLAGS = 0x8 IFA_RT_PRIORITY = 0x9 IFA_TARGET_NETNSID = 0xa + IFAL_LABEL = 0x2 + IFAL_ADDRESS = 0x1 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -689,6 +691,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfAddrlblmsg = 0xc SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 @@ -740,6 +743,15 @@ type IfAddrmsg struct { Index uint32 } +type IfAddrlblmsg struct { + Family uint8 + _ uint8 + Prefixlen uint8 + Flags uint8 + Index uint32 + Seq uint32 +} + type IfaCacheinfo struct { Prefered uint32 Valid uint32 @@ -3052,6 +3064,23 @@ const ( ) const ( + TCA_UNSPEC = 0x0 + TCA_KIND = 0x1 + TCA_OPTIONS = 0x2 + TCA_STATS = 0x3 + TCA_XSTATS = 0x4 + TCA_RATE = 0x5 + TCA_FCNT = 0x6 + TCA_STATS2 = 0x7 + TCA_STAB = 0x8 + TCA_PAD = 0x9 + TCA_DUMP_INVISIBLE = 0xa + TCA_CHAIN = 0xb + TCA_HW_OFFLOAD = 0xc + TCA_INGRESS_BLOCK = 0xd + TCA_EGRESS_BLOCK = 0xe + TCA_DUMP_FLAGS = 0xf + TCA_EXT_WARN_MSG = 0x10 RTNLGRP_NONE = 0x0 RTNLGRP_LINK = 0x1 RTNLGRP_NOTIFY = 0x2 @@ -3086,6 +3115,18 @@ const ( RTNLGRP_IPV6_MROUTE_R = 0x1f RTNLGRP_NEXTHOP = 0x20 RTNLGRP_BRVLAN = 0x21 + RTNLGRP_MCTP_IFADDR = 0x22 + RTNLGRP_TUNNEL = 0x23 + RTNLGRP_STATS = 0x24 + RTNLGRP_IPV4_MCADDR = 0x25 + RTNLGRP_IPV6_MCADDR = 0x26 + RTNLGRP_IPV6_ACADDR = 0x27 + TCA_ROOT_UNSPEC = 0x0 + TCA_ROOT_TAB = 0x1 + TCA_ROOT_FLAGS = 0x2 + TCA_ROOT_COUNT = 0x3 + TCA_ROOT_TIME_DELTA = 0x4 + TCA_ROOT_EXT_WARN_MSG = 0x5 ) type CapUserHeader struct { diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 958bcf47a3864..993a2297dbe1a 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1976,6 +1976,12 @@ const ( SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 ) +// FILE_ZERO_DATA_INFORMATION from winioctl.h +type FileZeroDataInformation struct { + FileOffset int64 + BeyondFinalZero int64 +} + const ( ComputerNameNetBIOS = 0 ComputerNameDnsHostname = 1 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index a58bc48b8edee..641a5f4b775aa 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -546,25 +546,25 @@ var ( ) func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { - r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error)) ret = Errno(r0) return } @@ -574,7 +574,7 @@ func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, if resetToDefault { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -586,7 +586,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok if disableAllPrivileges { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -594,7 +594,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok } func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -602,7 +602,7 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s } func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -610,7 +610,7 @@ func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries } func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -618,7 +618,7 @@ func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err err } func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName))) if r1 == 0 { err = errnoErr(e1) } @@ -626,7 +626,7 @@ func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, e } func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) if r1 == 0 { err = errnoErr(e1) } @@ -634,7 +634,7 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) ( } func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -642,7 +642,7 @@ func CloseServiceHandle(handle Handle) (err error) { } func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -650,7 +650,7 @@ func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err } func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen))) if r1 == 0 { err = errnoErr(e1) } @@ -658,7 +658,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR } func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid))) if r1 == 0 { err = errnoErr(e1) } @@ -675,7 +675,7 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui } func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -683,7 +683,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -691,7 +691,7 @@ func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { } func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) if r1 == 0 { err = errnoErr(e1) } @@ -703,7 +703,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -711,7 +711,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc } func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -720,7 +720,7 @@ func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access } func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid))) if r1 == 0 { err = errnoErr(e1) } @@ -728,7 +728,7 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s } func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -736,7 +736,7 @@ func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16 } func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) if r1 == 0 { err = errnoErr(e1) } @@ -744,7 +744,7 @@ func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { } func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -752,7 +752,7 @@ func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { } func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service)) if r1 == 0 { err = errnoErr(e1) } @@ -760,7 +760,7 @@ func DeleteService(service Handle) (err error) { } func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -768,7 +768,7 @@ func DeregisterEventSource(handle Handle) (err error) { } func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) if r1 == 0 { err = errnoErr(e1) } @@ -776,7 +776,7 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes } func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -784,7 +784,7 @@ func EnumDependentServices(service Handle, activityState uint32, services *ENUM_ } func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName))) if r1 == 0 { err = errnoErr(e1) } @@ -792,13 +792,13 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv } func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2))) isEqual = r0 != 0 return } func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid))) if r1 != 0 { err = errnoErr(e1) } @@ -806,7 +806,7 @@ func FreeSid(sid *SID) (err error) { } func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { - r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) if r1 == 0 { err = errnoErr(e1) } @@ -814,7 +814,7 @@ func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { } func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid))) len = uint32(r0) return } @@ -829,7 +829,7 @@ func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -837,7 +837,7 @@ func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) if r1 == 0 { err = errnoErr(e1) } @@ -853,7 +853,7 @@ func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl if *daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1))) *daclPresent = _p0 != 0 *daclDefaulted = _p1 != 0 if r1 == 0 { @@ -867,7 +867,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau if *groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) *groupDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -876,7 +876,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau } func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd))) len = uint32(r0) return } @@ -886,7 +886,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau if *ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) *ownerDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -895,7 +895,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau } func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -911,7 +911,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl if *saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1))) *saclPresent = _p0 != 0 *saclDefaulted = _p1 != 0 if r1 == 0 { @@ -921,7 +921,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl } func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -929,25 +929,25 @@ func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid))) authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) return } func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index)) subAuthority = (*uint32)(unsafe.Pointer(r0)) return } func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid))) count = (*uint8)(unsafe.Pointer(r0)) return } func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -955,7 +955,7 @@ func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel)) if r1 == 0 { err = errnoErr(e1) } @@ -963,7 +963,7 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) { } func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision)) if r1 == 0 { err = errnoErr(e1) } @@ -979,7 +979,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint if rebootAfterShutdown { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -987,7 +987,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint } func isTokenRestricted(tokenHandle Token) (ret bool, err error) { - r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle)) ret = r0 != 0 if !ret { err = errnoErr(e1) @@ -996,25 +996,25 @@ func isTokenRestricted(tokenHandle Token) (ret bool, err error) { } func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd))) isValid = r0 != 0 return } func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid))) isValid = r0 != 0 return } func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType)) isWellKnown = r0 != 0 return } func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1022,7 +1022,7 @@ func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen } func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1030,7 +1030,7 @@ func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint3 } func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } @@ -1038,7 +1038,7 @@ func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err err } func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1046,7 +1046,7 @@ func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DE } func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1054,7 +1054,7 @@ func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURIT } func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1062,7 +1062,7 @@ func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERV } func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1070,7 +1070,7 @@ func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { } func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1079,7 +1079,7 @@ func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (ha } func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1092,7 +1092,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1100,7 +1100,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token } func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1108,7 +1108,7 @@ func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize } func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1120,7 +1120,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf if err != nil { return } - r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) + r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) if r1 == 0 { err = errnoErr(e1) } @@ -1128,7 +1128,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf } func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1136,7 +1136,7 @@ func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, b } func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1144,7 +1144,7 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { } func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1152,7 +1152,7 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize } func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1160,7 +1160,7 @@ func RegCloseKey(key Handle) (regerrno error) { } func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1176,7 +1176,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, if asynchronous { _p1 = 1 } - r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) + r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1184,7 +1184,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, } func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1192,7 +1192,7 @@ func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint } func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1200,7 +1200,7 @@ func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint } func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1208,7 +1208,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 } func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1217,7 +1217,7 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand } func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) + r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1226,7 +1226,7 @@ func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, cont } func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) if r1 == 0 { err = errnoErr(e1) } @@ -1234,7 +1234,7 @@ func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrS } func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } @@ -1242,7 +1242,7 @@ func RevertToSelf() (err error) { } func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1250,7 +1250,7 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE } func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { - r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) + r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { err = errnoErr(e1) } @@ -1267,7 +1267,7 @@ func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1275,7 +1275,7 @@ func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) if r1 == 0 { err = errnoErr(e1) } @@ -1291,7 +1291,7 @@ func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl * if daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1303,7 +1303,7 @@ func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaul if groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1315,7 +1315,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul if ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1323,7 +1323,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul } func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) return } @@ -1336,7 +1336,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * if saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1344,7 +1344,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * } func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1352,7 +1352,7 @@ func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus))) if r1 == 0 { err = errnoErr(e1) } @@ -1360,7 +1360,7 @@ func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) } func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token)) if r1 == 0 { err = errnoErr(e1) } @@ -1368,7 +1368,7 @@ func SetThreadToken(thread *Handle, token Token) (err error) { } func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen)) if r1 == 0 { err = errnoErr(e1) } @@ -1376,7 +1376,7 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable))) if r1 == 0 { err = errnoErr(e1) } @@ -1384,7 +1384,7 @@ func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { } func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) if r1 == 0 { err = errnoErr(e1) } @@ -1392,7 +1392,7 @@ func StartService(service Handle, numArgs uint32, argVectors **uint16) (err erro } func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1400,7 +1400,7 @@ func CertAddCertificateContextToStore(store Handle, certContext *CertContext, ad } func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -1408,7 +1408,7 @@ func CertCloseStore(store Handle, flags uint32) (err error) { } func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1417,7 +1417,7 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en } func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1425,13 +1425,13 @@ func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { } func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) { - r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext))) dupContext = (*CertContext)(unsafe.Pointer(r0)) return } func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext))) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1440,7 +1440,7 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex } func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) + r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) cert = (*CertContext)(unsafe.Pointer(r0)) if cert == nil { err = errnoErr(e1) @@ -1449,7 +1449,7 @@ func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags } func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) + r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) certchain = (*CertChainContext)(unsafe.Pointer(r0)) if certchain == nil { err = errnoErr(e1) @@ -1458,18 +1458,18 @@ func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint3 } func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { - r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) + r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) ret = (*CertExtension)(unsafe.Pointer(r0)) return } func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx))) return } func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx))) if r1 == 0 { err = errnoErr(e1) } @@ -1477,7 +1477,7 @@ func CertFreeCertificateContext(ctx *CertContext) (err error) { } func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx))) if r1 == 0 { err = errnoErr(e1) } @@ -1485,13 +1485,13 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a } func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { - r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) + r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) chars = uint32(r0) return } func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1500,7 +1500,7 @@ func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptPr } func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name))) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1509,7 +1509,7 @@ func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { } func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1521,7 +1521,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete if *callerFreeProvOrNCryptKey { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) *callerFreeProvOrNCryptKey = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -1530,7 +1530,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete } func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -1538,7 +1538,7 @@ func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte } func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1546,7 +1546,7 @@ func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, } func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) + r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } @@ -1554,7 +1554,7 @@ func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentT } func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1562,7 +1562,7 @@ func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBl } func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1571,7 +1571,7 @@ func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (sto } func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2))) same = r0 != 0 return } @@ -1586,7 +1586,7 @@ func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSR } func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) if r0 != 0 { status = syscall.Errno(r0) } @@ -1594,12 +1594,12 @@ func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DN } func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype)) return } func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1607,7 +1607,7 @@ func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1615,7 +1615,7 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { - r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1623,7 +1623,7 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { } func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1631,7 +1631,7 @@ func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapter } func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1639,7 +1639,7 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { } func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1647,7 +1647,7 @@ func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcod } func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1655,7 +1655,7 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1663,7 +1663,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { } func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1675,7 +1675,7 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1687,7 +1687,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1695,7 +1695,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext } func AddDllDirectory(path *uint16) (cookie uintptr, err error) { - r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path))) cookie = uintptr(r0) if cookie == 0 { err = errnoErr(e1) @@ -1704,7 +1704,7 @@ func AddDllDirectory(path *uint16) (cookie uintptr, err error) { } func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process)) if r1 == 0 { err = errnoErr(e1) } @@ -1712,7 +1712,7 @@ func AssignProcessToJobObject(job Handle, process Handle) (err error) { } func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s)) if r1 == 0 { err = errnoErr(e1) } @@ -1720,7 +1720,7 @@ func CancelIo(s Handle) (err error) { } func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } @@ -1728,7 +1728,7 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { } func ClearCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1736,7 +1736,7 @@ func ClearCommBreak(handle Handle) (err error) { } func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) if r1 == 0 { err = errnoErr(e1) } @@ -1744,7 +1744,7 @@ func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error } func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1752,12 +1752,12 @@ func CloseHandle(handle Handle) (err error) { } func ClosePseudoConsole(console Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console)) return } func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1765,7 +1765,7 @@ func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { } func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa))) if r1 == 0 { err = errnoErr(e1) } @@ -1773,7 +1773,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { } func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1782,7 +1782,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d } func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1791,7 +1791,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat } func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1800,7 +1800,7 @@ func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxS } func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1809,7 +1809,7 @@ func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes } func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1817,7 +1817,7 @@ func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr } func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1826,7 +1826,7 @@ func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, thr } func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1835,7 +1835,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, } func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1848,7 +1848,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 if initialOwner { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1857,7 +1857,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 } func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1866,7 +1866,7 @@ func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances u } func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -1878,7 +1878,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -1886,7 +1886,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA } func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole))) if r0 != 0 { hr = syscall.Errno(r0) } @@ -1894,7 +1894,7 @@ func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pcons } func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1902,7 +1902,7 @@ func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags u } func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1911,7 +1911,7 @@ func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, er } func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { err = errnoErr(e1) } @@ -1919,7 +1919,7 @@ func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err } func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -1927,12 +1927,12 @@ func DeleteFile(path *uint16) (err error) { } func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) { - syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0) + syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist))) return } func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint))) if r1 == 0 { err = errnoErr(e1) } @@ -1940,7 +1940,7 @@ func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { } func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1948,7 +1948,7 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff } func DisconnectNamedPipe(pipe Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } @@ -1960,7 +1960,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP if bInheritHandle { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions)) if r1 == 0 { err = errnoErr(e1) } @@ -1968,7 +1968,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP } func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc)) if r1 == 0 { err = errnoErr(e1) } @@ -1976,12 +1976,12 @@ func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { } func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode)) return } func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -1990,7 +1990,7 @@ func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, } func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1998,7 +1998,7 @@ func FindClose(handle Handle) (err error) { } func FindCloseChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2019,7 +2019,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter if watchSubtree { _p1 = 1 } - r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) + r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2028,7 +2028,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter } func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2037,7 +2037,7 @@ func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err erro } func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2046,7 +2046,7 @@ func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, b } func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2055,7 +2055,7 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er } func FindNextChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2063,7 +2063,7 @@ func FindNextChangeNotification(handle Handle) (err error) { } func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2071,7 +2071,7 @@ func findNextFile1(handle Handle, data *win32finddata1) (err error) { } func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2079,7 +2079,7 @@ func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uin } func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2087,7 +2087,7 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) } func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) + r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType)) resInfo = Handle(r0) if resInfo == 0 { err = errnoErr(e1) @@ -2096,7 +2096,7 @@ func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, } func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume)) if r1 == 0 { err = errnoErr(e1) } @@ -2104,7 +2104,7 @@ func FindVolumeClose(findVolume Handle) (err error) { } func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint)) if r1 == 0 { err = errnoErr(e1) } @@ -2112,7 +2112,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { } func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2120,7 +2120,7 @@ func FlushFileBuffers(handle Handle) (err error) { } func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -2132,7 +2132,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2141,7 +2141,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu } func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs))) if r1 == 0 { err = errnoErr(e1) } @@ -2149,7 +2149,7 @@ func FreeEnvironmentStrings(envs *uint16) (err error) { } func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2157,7 +2157,7 @@ func FreeLibrary(handle Handle) (err error) { } func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID)) if r1 == 0 { err = errnoErr(e1) } @@ -2165,19 +2165,19 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro } func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetACP.Addr()) acp = uint32(r0) return } func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat))) if r1 == 0 { err = errnoErr(e1) } @@ -2185,7 +2185,7 @@ func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { } func GetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -2193,7 +2193,7 @@ func GetCommState(handle Handle, lpDCB *DCB) (err error) { } func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -2201,13 +2201,13 @@ func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr()) cmd = (*uint16)(unsafe.Pointer(r0)) return } func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2215,7 +2215,7 @@ func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { } func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2223,7 +2223,7 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { } func GetConsoleCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2232,7 +2232,7 @@ func GetConsoleCP() (cp uint32, err error) { } func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode))) if r1 == 0 { err = errnoErr(e1) } @@ -2240,7 +2240,7 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { } func GetConsoleOutputCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2249,7 +2249,7 @@ func GetConsoleOutputCP() (cp uint32, err error) { } func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2257,7 +2257,7 @@ func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) ( } func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2266,19 +2266,19 @@ func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { } func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr()) pid = uint32(r0) return } func GetCurrentThreadId() (id uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr()) id = uint32(r0) return } func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes))) if r1 == 0 { err = errnoErr(e1) } @@ -2286,13 +2286,13 @@ func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint6 } func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName))) driveType = uint32(r0) return } func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr()) envs = (*uint16)(unsafe.Pointer(r0)) if envs == nil { err = errnoErr(e1) @@ -2301,7 +2301,7 @@ func GetEnvironmentStrings() (envs *uint16, err error) { } func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2310,7 +2310,7 @@ func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32 } func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode))) if r1 == 0 { err = errnoErr(e1) } @@ -2318,7 +2318,7 @@ func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { } func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2326,7 +2326,7 @@ func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { } func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name))) attrs = uint32(r0) if attrs == INVALID_FILE_ATTRIBUTES { err = errnoErr(e1) @@ -2335,7 +2335,7 @@ func GetFileAttributes(name *uint16) (attrs uint32, err error) { } func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2343,7 +2343,7 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e } func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -2351,7 +2351,7 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, } func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -2359,7 +2359,7 @@ func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2368,7 +2368,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) { } func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2377,7 +2377,7 @@ func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32 } func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2386,13 +2386,13 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( } func GetLargePageMinimum() (size uintptr) { - r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr()) size = uintptr(r0) return } func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLastError.Addr()) if r0 != 0 { lasterr = syscall.Errno(r0) } @@ -2400,7 +2400,7 @@ func GetLastError() (lasterr error) { } func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2409,7 +2409,7 @@ func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err } func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr()) drivesBitMask = uint32(r0) if drivesBitMask == 0 { err = errnoErr(e1) @@ -2418,7 +2418,7 @@ func GetLogicalDrives() (drivesBitMask uint32, err error) { } func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2427,13 +2427,13 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er } func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2442,7 +2442,7 @@ func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, } func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) if r1 == 0 { err = errnoErr(e1) } @@ -2450,7 +2450,7 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er } func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID))) if r1 == 0 { err = errnoErr(e1) } @@ -2458,7 +2458,7 @@ func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err erro } func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2466,7 +2466,7 @@ func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, m } func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) if r1 == 0 { err = errnoErr(e1) } @@ -2474,7 +2474,7 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 } func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID))) if r1 == 0 { err = errnoErr(e1) } @@ -2486,7 +2486,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -2494,7 +2494,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa } func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process)) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -2512,7 +2512,7 @@ func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { } func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname))) proc = uintptr(r0) if proc == 0 { err = errnoErr(e1) @@ -2521,7 +2521,7 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { } func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process)) id = uint32(r0) if id == 0 { err = errnoErr(e1) @@ -2530,7 +2530,7 @@ func GetProcessId(process Handle) (id uint32, err error) { } func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2538,7 +2538,7 @@ func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uin } func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -2546,7 +2546,7 @@ func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { } func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime))) if r1 == 0 { err = errnoErr(e1) } @@ -2554,12 +2554,12 @@ func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, } func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) + syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags))) return } func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout)) if r1 == 0 { err = errnoErr(e1) } @@ -2567,7 +2567,7 @@ func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overl } func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2576,12 +2576,12 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin } func getStartupInfo(startupInfo *StartupInfo) { - syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo))) return } func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2590,7 +2590,7 @@ func GetStdHandle(stdhandle uint32) (handle Handle, err error) { } func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2599,7 +2599,7 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2607,17 +2607,17 @@ func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2626,7 +2626,7 @@ func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err erro } func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2635,7 +2635,7 @@ func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { } func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2643,13 +2643,13 @@ func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr()) ms = uint64(r0) return } func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi))) rc = uint32(r0) if rc == 0xffffffff { err = errnoErr(e1) @@ -2658,7 +2658,7 @@ func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { } func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2666,7 +2666,7 @@ func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16 } func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetVersion.Addr()) ver = uint32(r0) if ver == 0 { err = errnoErr(e1) @@ -2675,7 +2675,7 @@ func GetVersion() (ver uint32, err error) { } func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2683,7 +2683,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN } func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2691,7 +2691,7 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume } func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) if r1 == 0 { err = errnoErr(e1) } @@ -2699,7 +2699,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint } func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2707,7 +2707,7 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui } func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength))) if r1 == 0 { err = errnoErr(e1) } @@ -2715,7 +2715,7 @@ func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16 } func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2724,7 +2724,7 @@ func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -2736,7 +2736,7 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) { if *isWow64 { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0))) *isWow64 = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -2749,7 +2749,7 @@ func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint1 if err != nil { return } - r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) if r1 == 0 { err = errnoErr(e1) } @@ -2766,7 +2766,7 @@ func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, e } func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2784,7 +2784,7 @@ func LoadLibrary(libname string) (handle Handle, err error) { } func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2793,7 +2793,7 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) { } func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo)) resData = Handle(r0) if resData == 0 { err = errnoErr(e1) @@ -2802,7 +2802,7 @@ func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { } func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length)) ptr = uintptr(r0) if ptr == 0 { err = errnoErr(e1) @@ -2811,7 +2811,7 @@ func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { } func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem)) handle = Handle(r0) if handle != 0 { err = errnoErr(e1) @@ -2820,7 +2820,7 @@ func LocalFree(hmem Handle) (handle Handle, err error) { } func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2828,7 +2828,7 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt } func LockResource(resData Handle) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) + r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2837,7 +2837,7 @@ func LockResource(resData Handle) (addr uintptr, err error) { } func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2846,7 +2846,7 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui } func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2854,7 +2854,7 @@ func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2862,7 +2862,7 @@ func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -2870,7 +2870,7 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { } func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to))) if r1 == 0 { err = errnoErr(e1) } @@ -2878,7 +2878,7 @@ func MoveFile(from *uint16, to *uint16) (err error) { } func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) nwrite = int32(r0) if nwrite == 0 { err = errnoErr(e1) @@ -2891,7 +2891,7 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2904,7 +2904,7 @@ func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2917,7 +2917,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2930,7 +2930,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2939,7 +2939,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand } func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2947,7 +2947,7 @@ func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overla } func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2955,7 +2955,7 @@ func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2963,7 +2963,7 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid))) if r1 == 0 { err = errnoErr(e1) } @@ -2971,7 +2971,7 @@ func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { } func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -2979,7 +2979,7 @@ func PulseEvent(event Handle) (err error) { } func PurgeComm(handle Handle, dwFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -2987,7 +2987,7 @@ func PurgeComm(handle Handle, dwFlags uint32) (err error) { } func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2996,7 +2996,7 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3 } func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -3004,7 +3004,7 @@ func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size } func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen))) if r1 == 0 { err = errnoErr(e1) } @@ -3012,7 +3012,7 @@ func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobO } func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl))) if r1 == 0 { err = errnoErr(e1) } @@ -3024,7 +3024,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree if watchSubTree { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == 0 { err = errnoErr(e1) } @@ -3036,7 +3036,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3044,7 +3044,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( } func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0) + r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead))) if r1 == 0 { err = errnoErr(e1) } @@ -3052,7 +3052,7 @@ func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size u } func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex)) if r1 == 0 { err = errnoErr(e1) } @@ -3060,7 +3060,7 @@ func ReleaseMutex(mutex Handle) (err error) { } func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3068,7 +3068,7 @@ func RemoveDirectory(path *uint16) (err error) { } func RemoveDllDirectory(cookie uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie)) if r1 == 0 { err = errnoErr(e1) } @@ -3076,7 +3076,7 @@ func RemoveDllDirectory(cookie uintptr) (err error) { } func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3084,7 +3084,7 @@ func ResetEvent(event Handle) (err error) { } func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size)) if r0 != 0 { hr = syscall.Errno(r0) } @@ -3092,7 +3092,7 @@ func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { } func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread)) ret = uint32(r0) if ret == 0xffffffff { err = errnoErr(e1) @@ -3101,7 +3101,7 @@ func ResumeThread(thread Handle) (ret uint32, err error) { } func SetCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3109,7 +3109,7 @@ func SetCommBreak(handle Handle) (err error) { } func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask)) if r1 == 0 { err = errnoErr(e1) } @@ -3117,7 +3117,7 @@ func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { } func SetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -3125,7 +3125,7 @@ func SetCommState(handle Handle, lpDCB *DCB) (err error) { } func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -3133,7 +3133,7 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func SetConsoleCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3141,7 +3141,7 @@ func SetConsoleCP(cp uint32) (err error) { } func setConsoleCursorPosition(console Handle, position uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position)) if r1 == 0 { err = errnoErr(e1) } @@ -3149,7 +3149,7 @@ func setConsoleCursorPosition(console Handle, position uint32) (err error) { } func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode)) if r1 == 0 { err = errnoErr(e1) } @@ -3157,7 +3157,7 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { } func SetConsoleOutputCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3165,7 +3165,7 @@ func SetConsoleOutputCP(cp uint32) (err error) { } func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3173,7 +3173,7 @@ func SetCurrentDirectory(path *uint16) (err error) { } func SetDefaultDllDirectories(directoryFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -3190,7 +3190,7 @@ func SetDllDirectory(path string) (err error) { } func _SetDllDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3198,7 +3198,7 @@ func _SetDllDirectory(path *uint16) (err error) { } func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3206,7 +3206,7 @@ func SetEndOfFile(handle Handle) (err error) { } func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value))) if r1 == 0 { err = errnoErr(e1) } @@ -3214,13 +3214,13 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { } func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode)) ret = uint32(r0) return } func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3228,7 +3228,7 @@ func SetEvent(event Handle) (err error) { } func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs)) if r1 == 0 { err = errnoErr(e1) } @@ -3236,7 +3236,7 @@ func SetFileAttributes(name *uint16, attrs uint32) (err error) { } func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3244,7 +3244,7 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) } func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -3252,7 +3252,7 @@ func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inB } func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence)) newlowoffset = uint32(r0) if newlowoffset == 0xffffffff { err = errnoErr(e1) @@ -3261,7 +3261,7 @@ func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence } func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -3269,7 +3269,7 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength)) if r1 == 0 { err = errnoErr(e1) } @@ -3277,7 +3277,7 @@ func SetFileValidData(handle Handle, validDataLength int64) (err error) { } func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3285,7 +3285,7 @@ func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) } func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength)) ret = int(r0) if ret == 0 { err = errnoErr(e1) @@ -3294,7 +3294,7 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb } func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout))) if r1 == 0 { err = errnoErr(e1) } @@ -3302,7 +3302,7 @@ func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uin } func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass)) if r1 == 0 { err = errnoErr(e1) } @@ -3314,7 +3314,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { if disable { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -3322,7 +3322,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { } func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3330,7 +3330,7 @@ func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { } func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3338,7 +3338,7 @@ func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr } func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3346,7 +3346,7 @@ func SetStdHandle(stdhandle uint32, handle Handle) (err error) { } func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3354,7 +3354,7 @@ func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { } func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3362,7 +3362,7 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro } func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) if r1 == 0 { err = errnoErr(e1) } @@ -3370,7 +3370,7 @@ func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { } func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo)) size = uint32(r0) if size == 0 { err = errnoErr(e1) @@ -3383,13 +3383,13 @@ func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { if alertable { _p0 = 1 } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0)) ret = uint32(r0) return } func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode)) if r1 == 0 { err = errnoErr(e1) } @@ -3397,7 +3397,7 @@ func TerminateJobObject(job Handle, exitCode uint32) (err error) { } func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode)) if r1 == 0 { err = errnoErr(e1) } @@ -3405,7 +3405,7 @@ func TerminateProcess(handle Handle, exitcode uint32) (err error) { } func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3413,7 +3413,7 @@ func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3421,7 +3421,7 @@ func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3429,7 +3429,7 @@ func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint3 } func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr)) if r1 == 0 { err = errnoErr(e1) } @@ -3437,7 +3437,7 @@ func UnmapViewOfFile(addr uintptr) (err error) { } func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize))) if r1 == 0 { err = errnoErr(e1) } @@ -3445,7 +3445,7 @@ func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, } func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect)) value = uintptr(r0) if value == 0 { err = errnoErr(e1) @@ -3454,7 +3454,7 @@ func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint3 } func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype)) if r1 == 0 { err = errnoErr(e1) } @@ -3462,7 +3462,7 @@ func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { } func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3470,7 +3470,7 @@ func VirtualLock(addr uintptr, length uintptr) (err error) { } func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect))) if r1 == 0 { err = errnoErr(e1) } @@ -3478,7 +3478,7 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect } func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect))) if r1 == 0 { err = errnoErr(e1) } @@ -3486,7 +3486,7 @@ func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect } func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) + r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3494,7 +3494,7 @@ func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintpt } func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3502,7 +3502,7 @@ func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformat } func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3510,13 +3510,13 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) { } func WTSGetActiveConsoleSessionId() (sessionID uint32) { - r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr()) sessionID = uint32(r0) return } func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3528,7 +3528,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil if waitAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3537,7 +3537,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil } func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3546,7 +3546,7 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, } func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved))) if r1 == 0 { err = errnoErr(e1) } @@ -3558,7 +3558,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3566,7 +3566,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) } func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0) + r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten))) if r1 == 0 { err = errnoErr(e1) } @@ -3574,7 +3574,7 @@ func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size } func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3582,12 +3582,12 @@ func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32 } func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen))) return } func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3595,7 +3595,7 @@ func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint } func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3603,7 +3603,7 @@ func NetApiBufferFree(buf *byte) (neterr error) { } func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3611,7 +3611,7 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete } func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { - r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3619,7 +3619,7 @@ func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, pr } func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3627,7 +3627,7 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by } func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3635,7 +3635,7 @@ func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO } func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3643,7 +3643,7 @@ func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, i } func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) + r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3651,7 +3651,7 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe } func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3659,7 +3659,7 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf } func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3667,7 +3667,7 @@ func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, } func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3675,7 +3675,7 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P } func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) + r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3683,13 +3683,13 @@ func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoL } func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) + r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) ret = r0 != 0 return } func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3697,13 +3697,13 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { } func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable))) ret = r0 != 0 return } func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3711,7 +3711,7 @@ func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFile } func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3719,18 +3719,18 @@ func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString } func RtlGetCurrentPeb() (peb *PEB) { - r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr()) peb = (*PEB)(unsafe.Pointer(r0)) return } func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3738,23 +3738,23 @@ func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { } func RtlInitString(destinationString *NTString, sourceString *byte) { - syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { - syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus)) ret = syscall.Errno(r0) return } func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3762,7 +3762,7 @@ func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { } func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3770,7 +3770,7 @@ func coCreateGuid(pguid *GUID) (ret error) { } func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { - r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3778,7 +3778,7 @@ func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable * } func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { - r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) + r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3786,23 +3786,23 @@ func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { } func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address)) return } func CoUninitialize() { - syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) + syscall.SyscallN(procCoUninitialize.Addr()) return } func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) chars = int32(r0) return } func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -3810,7 +3810,7 @@ func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uin } func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag)) if r1 == 0 { err = errnoErr(e1) } @@ -3818,7 +3818,7 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u } func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -3826,7 +3826,7 @@ func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err } func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3834,7 +3834,7 @@ func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uin } func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3842,7 +3842,7 @@ func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size u } func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3850,7 +3850,7 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb } func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3862,7 +3862,7 @@ func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callb if ret != nil { return } - r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) + r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3874,12 +3874,12 @@ func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { if err != nil { return } - syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) + syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription)) return } func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3887,7 +3887,7 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er } func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3895,7 +3895,7 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint } func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3903,7 +3903,7 @@ func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3911,7 +3911,7 @@ func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInf } func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3919,7 +3919,7 @@ func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { } func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3927,7 +3927,7 @@ func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGu } func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3935,7 +3935,7 @@ func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSiz } func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3944,7 +3944,7 @@ func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineN } func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3952,7 +3952,7 @@ func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUI } func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3960,7 +3960,7 @@ func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { } func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3968,7 +3968,7 @@ func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3976,7 +3976,7 @@ func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfo } func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3984,7 +3984,7 @@ func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, d } func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3993,7 +3993,7 @@ func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintp } func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4001,7 +4001,7 @@ func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData))) if r1 == 0 { err = errnoErr(e1) } @@ -4009,7 +4009,7 @@ func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailDa } func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4017,7 +4017,7 @@ func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4025,7 +4025,7 @@ func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -4033,7 +4033,7 @@ func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4041,7 +4041,7 @@ func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4049,7 +4049,7 @@ func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4057,7 +4057,7 @@ func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4065,7 +4065,7 @@ func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) key = Handle(r0) if key == InvalidHandle { err = errnoErr(e1) @@ -4074,7 +4074,7 @@ func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Sc } func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4082,7 +4082,7 @@ func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4090,7 +4090,7 @@ func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4098,7 +4098,7 @@ func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4106,7 +4106,7 @@ func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4114,7 +4114,7 @@ func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -4122,7 +4122,7 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er } func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc))) argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) @@ -4131,7 +4131,7 @@ func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { } func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4139,7 +4139,7 @@ func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **u } func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) if r1 <= 32 { err = errnoErr(e1) } @@ -4147,12 +4147,12 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui } func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { - syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param)) return } func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param)) if r1 == 0 { err = errnoErr(e1) } @@ -4160,7 +4160,7 @@ func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { } func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -4168,7 +4168,7 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { } func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { - r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) copied = int32(r0) if copied == 0 { err = errnoErr(e1) @@ -4177,19 +4177,19 @@ func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, e } func GetDesktopWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr()) hwnd = HWND(r0) return } func GetForegroundWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr()) hwnd = HWND(r0) return } func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -4197,19 +4197,19 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { } func GetKeyboardLayout(tid uint32) (hkl Handle) { - r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid)) hkl = Handle(r0) return } func GetShellWindow() (shellWindow HWND) { - r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr()) shellWindow = HWND(r0) return } func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid))) tid = uint32(r0) if tid == 0 { err = errnoErr(e1) @@ -4218,25 +4218,25 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { } func IsWindow(hwnd HWND) (isWindow bool) { - r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd)) isWindow = r0 != 0 return } func IsWindowUnicode(hwnd HWND) (isUnicode bool) { - r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd)) isUnicode = r0 != 0 return } func IsWindowVisible(hwnd HWND) (isVisible bool) { - r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd)) isVisible = r0 != 0 return } func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags)) hkl = Handle(r0) if hkl == 0 { err = errnoErr(e1) @@ -4245,7 +4245,7 @@ func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { } func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype)) ret = int32(r0) if ret == 0 { err = errnoErr(e1) @@ -4254,13 +4254,13 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i } func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { - r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl)) ret = int32(r0) return } func UnloadKeyboardLayout(hkl Handle) (err error) { - r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl)) if r1 == 0 { err = errnoErr(e1) } @@ -4272,7 +4272,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( if inheritExisting { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -4280,7 +4280,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( } func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block))) if r1 == 0 { err = errnoErr(e1) } @@ -4288,7 +4288,7 @@ func DestroyEnvironmentBlock(block *uint16) (err error) { } func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) if r1 == 0 { err = errnoErr(e1) } @@ -4305,7 +4305,7 @@ func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32 } func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0) + r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle))) bufSize = uint32(r0) if bufSize == 0 { err = errnoErr(e1) @@ -4323,7 +4323,7 @@ func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer u } func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer)) if r1 == 0 { err = errnoErr(e1) } @@ -4340,7 +4340,7 @@ func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer } func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4348,7 +4348,7 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint } func TimeBeginPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4356,7 +4356,7 @@ func TimeBeginPeriod(period uint32) (err error) { } func TimeEndPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4364,7 +4364,7 @@ func TimeEndPeriod(period uint32) (err error) { } func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { - r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) + r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4372,12 +4372,12 @@ func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) } func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo))) return } func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4385,7 +4385,7 @@ func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, resul } func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr()) if r1 == socket_error { err = errnoErr(e1) } @@ -4393,7 +4393,7 @@ func WSACleanup() (err error) { } func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) { - r1, _, e1 := syscall.Syscall(procWSADuplicateSocketW.Addr(), 3, uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) if r1 != 0 { err = errnoErr(e1) } @@ -4401,7 +4401,7 @@ func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err } func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4414,7 +4414,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -4422,7 +4422,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f } func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == socket_error { err = errnoErr(e1) } @@ -4430,7 +4430,7 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo } func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) if r1 == socket_error { err = errnoErr(e1) } @@ -4438,7 +4438,7 @@ func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) } func WSALookupServiceEnd(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle)) if r1 == socket_error { err = errnoErr(e1) } @@ -4446,7 +4446,7 @@ func WSALookupServiceEnd(handle Handle) (err error) { } func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { - r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet))) if r1 == socket_error { err = errnoErr(e1) } @@ -4454,7 +4454,7 @@ func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WS } func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4462,7 +4462,7 @@ func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32 } func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4470,7 +4470,7 @@ func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *ui } func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4478,7 +4478,7 @@ func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, } func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4486,7 +4486,7 @@ func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32 } func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4495,7 +4495,7 @@ func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, } func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4503,7 +4503,7 @@ func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { } func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4511,7 +4511,7 @@ func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { } func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s)) if r1 == socket_error { err = errnoErr(e1) } @@ -4519,7 +4519,7 @@ func Closesocket(s Handle) (err error) { } func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4536,7 +4536,7 @@ func GetHostByName(name string) (h *Hostent, err error) { } func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name))) h = (*Hostent)(unsafe.Pointer(r0)) if h == nil { err = errnoErr(e1) @@ -4545,7 +4545,7 @@ func _GetHostByName(name *byte) (h *Hostent, err error) { } func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4562,7 +4562,7 @@ func GetProtoByName(name string) (p *Protoent, err error) { } func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name))) p = (*Protoent)(unsafe.Pointer(r0)) if p == nil { err = errnoErr(e1) @@ -4585,7 +4585,7 @@ func GetServByName(name string, proto string) (s *Servent, err error) { } func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto))) s = (*Servent)(unsafe.Pointer(r0)) if s == nil { err = errnoErr(e1) @@ -4594,7 +4594,7 @@ func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { } func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4602,7 +4602,7 @@ func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { } func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4610,7 +4610,7 @@ func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int3 } func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog)) if r1 == socket_error { err = errnoErr(e1) } @@ -4618,7 +4618,7 @@ func listen(s Handle, backlog int32) (err error) { } func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort)) u = uint16(r0) return } @@ -4628,7 +4628,7 @@ func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen * if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4641,7 +4641,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4649,7 +4649,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( } func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4657,7 +4657,7 @@ func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32 } func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how)) if r1 == socket_error { err = errnoErr(e1) } @@ -4665,7 +4665,7 @@ func shutdown(s Handle, how int32) (err error) { } func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4674,7 +4674,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { } func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count))) if r1 == 0 { err = errnoErr(e1) } @@ -4682,12 +4682,12 @@ func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessio } func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr)) return } func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go b/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go index 77e90b10a6865..d712316c59cc5 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go @@ -18,15 +18,57 @@ package chartutil import ( "bytes" + "crypto/tls" "errors" "fmt" "strings" + "time" "github.com/santhosh-tekuri/jsonschema/v6" + "net/http" + + "helm.sh/helm/v3/internal/version" "helm.sh/helm/v3/pkg/chart" ) +// HTTPURLLoader implements a loader for HTTP/HTTPS URLs +type HTTPURLLoader http.Client + +func (l *HTTPURLLoader) Load(urlStr string) (any, error) { + client := (*http.Client)(l) + + req, err := http.NewRequest(http.MethodGet, urlStr, nil) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP request for %s: %w", urlStr, err) + } + req.Header.Set("User-Agent", version.GetUserAgent()) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("HTTP request failed for %s: %w", urlStr, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP request to %s returned status %d (%s)", urlStr, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return jsonschema.UnmarshalJSON(resp.Body) +} + +// newHTTPURLLoader creates a HTTP URL loader with proxy support. +func newHTTPURLLoader() *HTTPURLLoader { + httpLoader := HTTPURLLoader(http.Client{ + Timeout: 15 * time.Second, + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{}, + }, + }) + return &httpLoader +} + // ValidateAgainstSchema checks that values does not violate the structure laid out in schema func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error { var sb strings.Builder @@ -68,7 +110,15 @@ func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error return err } + // Configure compiler with loaders for different URL schemes + loader := jsonschema.SchemeURLLoader{ + "file": jsonschema.FileLoader{}, + "http": newHTTPURLLoader(), + "https": newHTTPURLLoader(), + } + compiler := jsonschema.NewCompiler() + compiler.UseLoader(loader) err = compiler.AddResource("file:///values.schema.json", schema) if err != nil { return err diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/formats.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/formats.go index c11f1c627a741..5c0397909b656 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/formats.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/formats.go @@ -113,9 +113,11 @@ func GetUnrecognizedFormats(schema *spec.Schema, compatibilityVersion *version.V return unrecognizedFormats } - normalized := strings.ReplaceAll(schema.Format, "-", "") // go-openapi default format name normalization - if !supportedFormatsAtVersion(compatibilityVersion).supported.Has(normalized) { - unrecognizedFormats = append(unrecognizedFormats, schema.Format) + if len(schema.Type) == 1 && schema.Type[0] == "string" { + normalized := strings.ReplaceAll(schema.Format, "-", "") // go-openapi default format name normalization + if !supportedFormatsAtVersion(compatibilityVersion).supported.Has(normalized) { + unrecognizedFormats = append(unrecognizedFormats, schema.Format) + } } return unrecognizedFormats diff --git a/vendor/k8s.io/metrics/LICENSE b/vendor/k8s.io/metrics/LICENSE new file mode 100644 index 0000000000000..8dada3edaf50d --- /dev/null +++ b/vendor/k8s.io/metrics/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go new file mode 100644 index 0000000000000..de79b6d058364 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=metrics.k8s.io + +// Package metrics defines an API for exposing metrics. +package metrics diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/register.go b/vendor/k8s.io/metrics/pkg/apis/metrics/register.go new file mode 100644 index 0000000000000..9384e44b91212 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "metrics.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NodeMetrics{}, + &NodeMetricsList{}, + &PodMetrics{}, + &PodMetricsList{}, + ) + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/types.go b/vendor/k8s.io/metrics/pkg/apis/metrics/types.go new file mode 100644 index 0000000000000..f1c58c76840d8 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/types.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +resourceName=nodes +// +genclient:readonly +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetrics sets resource usage metrics of a node. +type NodeMetrics struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time + Window metav1.Duration + + // The memory usage is the memory working set. + Usage corev1.ResourceList +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetricsList is a list of NodeMetrics. +type NodeMetricsList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta + + // List of node metrics. + Items []NodeMetrics +} + +// +genclient +// +resourceName=pods +// +genclient:readonly +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetrics sets resource usage metrics of a pod. +type PodMetrics struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time + Window metav1.Duration + + // Metrics for all containers are collected within the same time window. + Containers []ContainerMetrics +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetricsList is a list of PodMetrics. +type PodMetricsList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta + + // List of pod metrics. + Items []PodMetrics +} + +// ContainerMetrics sets resource usage metrics of a container. +type ContainerMetrics struct { + // Container name corresponding to the one from pod.spec.containers. + Name string + // The memory usage is the memory working set. + Usage corev1.ResourceList +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go new file mode 100644 index 0000000000000..760a9afbadfa6 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/metrics/pkg/apis/metrics +// +k8s:openapi-gen=true +// +groupName=metrics.k8s.io + +// Package v1beta1 is the v1beta1 version of the metrics API. +package v1beta1 diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go new file mode 100644 index 0000000000000..ae62aa8370072 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go @@ -0,0 +1,1757 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ContainerMetrics) Reset() { *m = ContainerMetrics{} } +func (*ContainerMetrics) ProtoMessage() {} +func (*ContainerMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_0eb2073129c5331c, []int{0} +} +func (m *ContainerMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerMetrics.Merge(m, src) +} +func (m *ContainerMetrics) XXX_Size() int { + return m.Size() +} +func (m *ContainerMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerMetrics proto.InternalMessageInfo + +func (m *NodeMetrics) Reset() { *m = NodeMetrics{} } +func (*NodeMetrics) ProtoMessage() {} +func (*NodeMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_0eb2073129c5331c, []int{1} +} +func (m *NodeMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetrics.Merge(m, src) +} +func (m *NodeMetrics) XXX_Size() int { + return m.Size() +} +func (m *NodeMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetrics proto.InternalMessageInfo + +func (m *NodeMetricsList) Reset() { *m = NodeMetricsList{} } +func (*NodeMetricsList) ProtoMessage() {} +func (*NodeMetricsList) Descriptor() ([]byte, []int) { + return fileDescriptor_0eb2073129c5331c, []int{2} +} +func (m *NodeMetricsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeMetricsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetricsList.Merge(m, src) +} +func (m *NodeMetricsList) XXX_Size() int { + return m.Size() +} +func (m *NodeMetricsList) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetricsList.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetricsList proto.InternalMessageInfo + +func (m *PodMetrics) Reset() { *m = PodMetrics{} } +func (*PodMetrics) ProtoMessage() {} +func (*PodMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_0eb2073129c5331c, []int{3} +} +func (m *PodMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodMetrics.Merge(m, src) +} +func (m *PodMetrics) XXX_Size() int { + return m.Size() +} +func (m *PodMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_PodMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_PodMetrics proto.InternalMessageInfo + +func (m *PodMetricsList) Reset() { *m = PodMetricsList{} } +func (*PodMetricsList) ProtoMessage() {} +func (*PodMetricsList) Descriptor() ([]byte, []int) { + return fileDescriptor_0eb2073129c5331c, []int{4} +} +func (m *PodMetricsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodMetricsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodMetricsList.Merge(m, src) +} +func (m *PodMetricsList) XXX_Size() int { + return m.Size() +} +func (m *PodMetricsList) XXX_DiscardUnknown() { + xxx_messageInfo_PodMetricsList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodMetricsList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ContainerMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.ContainerMetrics") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.ContainerMetrics.UsageEntry") + proto.RegisterType((*NodeMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.NodeMetrics") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.NodeMetrics.UsageEntry") + proto.RegisterType((*NodeMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.NodeMetricsList") + proto.RegisterType((*PodMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.PodMetrics") + proto.RegisterType((*PodMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.PodMetricsList") +} + +func init() { + proto.RegisterFile("k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto", fileDescriptor_0eb2073129c5331c) +} + +var fileDescriptor_0eb2073129c5331c = []byte{ + // 646 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xbf, 0x6f, 0xd3, 0x4e, + 0x1c, 0xcd, 0x35, 0x49, 0xd5, 0x5e, 0xbe, 0xdf, 0x52, 0x3c, 0x55, 0x19, 0x9c, 0xca, 0x4b, 0x2b, + 0xa4, 0x9e, 0x69, 0xa8, 0xa0, 0xb0, 0x20, 0x99, 0x30, 0x20, 0x91, 0x02, 0x56, 0xf9, 0xcd, 0xc0, + 0xc5, 0x39, 0x9c, 0x23, 0xd8, 0x67, 0xd9, 0xe7, 0x54, 0xd9, 0x50, 0xc5, 0xc4, 0x84, 0xf8, 0xab, + 0x22, 0xa6, 0x8e, 0x1d, 0x50, 0x4a, 0xcc, 0xcc, 0x3f, 0xc0, 0x84, 0x7c, 0x3e, 0xc7, 0xa1, 0x09, + 0xa9, 0xe9, 0xc0, 0xd4, 0xcd, 0xfe, 0xf8, 0xde, 0x7b, 0x9f, 0x7b, 0x9f, 0x77, 0x27, 0xc3, 0x1b, + 0xdd, 0xdd, 0x00, 0x51, 0xa6, 0x3b, 0x84, 0xfb, 0xd4, 0x0a, 0x74, 0xaf, 0x6b, 0xeb, 0xd8, 0xa3, + 0xc1, 0xb8, 0xd0, 0xdb, 0x6e, 0x11, 0x8e, 0xb7, 0x75, 0x9b, 0xb8, 0xc4, 0xc7, 0x9c, 0xb4, 0x91, + 0xe7, 0x33, 0xce, 0x94, 0x8d, 0x04, 0x88, 0xe4, 0x3a, 0xe4, 0x75, 0x6d, 0x14, 0x03, 0xc7, 0x05, + 0x09, 0xac, 0x6e, 0xd9, 0x94, 0x77, 0xc2, 0x16, 0xb2, 0x98, 0xa3, 0xdb, 0xcc, 0x66, 0xba, 0xc0, + 0xb7, 0xc2, 0x37, 0xe2, 0x4d, 0xbc, 0x88, 0xa7, 0x84, 0xb7, 0xaa, 0xc9, 0x86, 0xb0, 0x47, 0x75, + 0x8b, 0xf9, 0x44, 0xef, 0x4d, 0x69, 0x57, 0x77, 0xb2, 0x35, 0x0e, 0xb6, 0x3a, 0xd4, 0x25, 0x7e, + 0x3f, 0xed, 0x5c, 0xf7, 0x49, 0xc0, 0x42, 0xdf, 0x22, 0x7f, 0x85, 0x12, 0xfb, 0xc5, 0xb3, 0xb4, + 0xf4, 0x3f, 0xa1, 0xfc, 0xd0, 0xe5, 0xd4, 0x99, 0x96, 0xb9, 0x7e, 0x16, 0x20, 0xb0, 0x3a, 0xc4, + 0xc1, 0xa7, 0x71, 0xda, 0x61, 0x11, 0xae, 0xde, 0x61, 0x2e, 0xc7, 0x31, 0xa2, 0x99, 0x98, 0xa8, + 0xac, 0xc3, 0x92, 0x8b, 0x1d, 0xb2, 0x06, 0xd6, 0xc1, 0xe6, 0xb2, 0xf1, 0xdf, 0x60, 0x58, 0x2b, + 0x44, 0xc3, 0x5a, 0x69, 0x0f, 0x3b, 0xc4, 0x14, 0x5f, 0x94, 0x11, 0x80, 0xe5, 0x30, 0xc0, 0x36, + 0x59, 0x5b, 0x58, 0x2f, 0x6e, 0x56, 0xea, 0x0d, 0x94, 0x73, 0x30, 0xe8, 0xb4, 0x18, 0x7a, 0x1c, + 0xd3, 0xdc, 0x75, 0xb9, 0xdf, 0x37, 0x3e, 0x00, 0x29, 0x55, 0x16, 0xc5, 0x9f, 0xc3, 0x5a, 0x6d, + 0x7a, 0x2e, 0xc8, 0x94, 0x56, 0xdf, 0xa7, 0x01, 0x3f, 0x3c, 0x99, 0xbb, 0x24, 0xee, 0xf8, 0xe3, + 0x49, 0x6d, 0x2b, 0xcf, 0xe4, 0xd0, 0xa3, 0x10, 0xbb, 0x9c, 0xf2, 0xbe, 0x99, 0xec, 0xac, 0xda, + 0x81, 0x30, 0xeb, 0x4d, 0x59, 0x85, 0xc5, 0x2e, 0xe9, 0x27, 0x96, 0x98, 0xf1, 0xa3, 0xd2, 0x80, + 0xe5, 0x1e, 0x7e, 0x17, 0xc6, 0x16, 0x80, 0xcd, 0x4a, 0x1d, 0xa5, 0x16, 0x4c, 0xaa, 0xa4, 0x3e, + 0xa0, 0x19, 0x2a, 0x02, 0x7c, 0x6b, 0x61, 0x17, 0x68, 0x3f, 0x4a, 0xb0, 0xb2, 0xc7, 0xda, 0x24, + 0xf5, 0xff, 0x35, 0x5c, 0x8a, 0x83, 0xd1, 0xc6, 0x1c, 0x0b, 0xc1, 0x4a, 0xfd, 0xea, 0x3c, 0x72, + 0x61, 0x32, 0x46, 0xbd, 0x6d, 0xf4, 0xa0, 0xf5, 0x96, 0x58, 0xbc, 0x49, 0x38, 0x36, 0x14, 0x69, + 0x25, 0xcc, 0x6a, 0xe6, 0x98, 0x55, 0x79, 0x09, 0x97, 0xe3, 0x54, 0x04, 0x1c, 0x3b, 0x9e, 0xec, + 0xff, 0x4a, 0x3e, 0x89, 0x7d, 0xea, 0x10, 0xe3, 0xb2, 0x24, 0x5f, 0xde, 0x4f, 0x49, 0xcc, 0x8c, + 0x4f, 0x79, 0x02, 0x17, 0x0f, 0xa8, 0xdb, 0x66, 0x07, 0x6b, 0xc5, 0xb3, 0x9d, 0xc9, 0x98, 0x1b, + 0xa1, 0x8f, 0x39, 0x65, 0xae, 0xb1, 0x22, 0xd9, 0x17, 0x9f, 0x0a, 0x16, 0x53, 0xb2, 0x29, 0x5f, + 0xc7, 0xa1, 0x2b, 0x89, 0xd0, 0xdd, 0xce, 0x1d, 0xba, 0x09, 0x73, 0x2f, 0xf2, 0x06, 0xb4, 0x2f, + 0x00, 0x5e, 0x9a, 0xb0, 0x24, 0xde, 0x98, 0xf2, 0x6a, 0x2a, 0x73, 0x39, 0xc7, 0x16, 0xa3, 0x45, + 0xe2, 0x56, 0xa5, 0x99, 0x4b, 0x69, 0x65, 0x22, 0x6f, 0xcf, 0x61, 0x99, 0x72, 0xe2, 0x04, 0xf2, + 0xba, 0xd8, 0x39, 0xcf, 0xe4, 0x8c, 0xff, 0xd3, 0x69, 0xdd, 0x8b, 0xa9, 0xcc, 0x84, 0x51, 0xfb, + 0x5c, 0x84, 0xf0, 0x21, 0x6b, 0x5f, 0x9c, 0x9d, 0xb9, 0x67, 0xc7, 0x81, 0xd0, 0x4a, 0x6f, 0xde, + 0x40, 0x9e, 0x9f, 0x9b, 0xe7, 0xbe, 0xb4, 0x33, 0x87, 0xc6, 0x5f, 0x02, 0x73, 0x42, 0x40, 0x1b, + 0x00, 0xb8, 0x92, 0x0d, 0xe5, 0x1f, 0x04, 0xec, 0xd9, 0xef, 0x01, 0xbb, 0x96, 0x7b, 0x6b, 0x59, + 0x97, 0xb3, 0xf3, 0x65, 0x34, 0x07, 0x23, 0xb5, 0x70, 0x34, 0x52, 0x0b, 0xc7, 0x23, 0xb5, 0xf0, + 0x3e, 0x52, 0xc1, 0x20, 0x52, 0xc1, 0x51, 0xa4, 0x82, 0xe3, 0x48, 0x05, 0xdf, 0x22, 0x15, 0x7c, + 0xfa, 0xae, 0x16, 0x5e, 0x6c, 0xe4, 0xfc, 0xa3, 0xf9, 0x15, 0x00, 0x00, 0xff, 0xff, 0x11, 0x63, + 0x3e, 0x0d, 0xfb, 0x08, 0x00, 0x00, +} + +func (m *ContainerMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Usage) > 0 { + keysForUsage := make([]string, 0, len(m.Usage)) + for k := range m.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { + v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsage[iNdEx]) + copy(dAtA[i:], keysForUsage[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Usage) > 0 { + keysForUsage := make([]string, 0, len(m.Usage)) + for k := range m.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { + v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsage[iNdEx]) + copy(dAtA[i:], keysForUsage[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeMetricsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMetricsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodMetricsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodMetricsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ContainerMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Usage) > 0 { + for k, v := range m.Usage { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Timestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Window.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Usage) > 0 { + for k, v := range m.Usage { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeMetricsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Timestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Window.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodMetricsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ContainerMetrics) String() string { + if this == nil { + return "nil" + } + keysForUsage := make([]string, 0, len(this.Usage)) + for k := range this.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForUsage { + mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForUsage += "}" + s := strings.Join([]string{`&ContainerMetrics{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Usage:` + mapStringForUsage + `,`, + `}`, + }, "") + return s +} +func (this *NodeMetrics) String() string { + if this == nil { + return "nil" + } + keysForUsage := make([]string, 0, len(this.Usage)) + for k := range this.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForUsage { + mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForUsage += "}" + s := strings.Join([]string{`&NodeMetrics{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, + `Usage:` + mapStringForUsage + `,`, + `}`, + }, "") + return s +} +func (this *NodeMetricsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]NodeMetrics{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NodeMetrics", "NodeMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NodeMetricsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodMetrics) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainers := "[]ContainerMetrics{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ContainerMetrics", "ContainerMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + s := strings.Join([]string{`&PodMetrics{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `}`, + }, "") + return s +} +func (this *PodMetricsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodMetrics{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodMetrics", "PodMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodMetricsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeMetricsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeMetricsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NodeMetrics{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, ContainerMetrics{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodMetricsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodMetricsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodMetrics{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto new file mode 100644 index 0000000000000..602907938ad02 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto @@ -0,0 +1,96 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.metrics.pkg.apis.metrics.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/metrics/pkg/apis/metrics/v1beta1"; + +// ContainerMetrics sets resource usage metrics of a container. +message ContainerMetrics { + // Container name corresponding to the one from pod.spec.containers. + optional string name = 1; + + // The memory usage is the memory working set. + map usage = 2; +} + +// NodeMetrics sets resource usage metrics of a node. +message NodeMetrics { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + + // The memory usage is the memory working set. + map usage = 4; +} + +// NodeMetricsList is a list of NodeMetrics. +message NodeMetricsList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of node metrics. + repeated NodeMetrics items = 2; +} + +// PodMetrics sets resource usage metrics of a pod. +message PodMetrics { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + + // Metrics for all containers are collected within the same time window. + // +listType=atomic + repeated ContainerMetrics containers = 4; +} + +// PodMetricsList is a list of PodMetrics. +message PodMetricsList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pod metrics. + repeated PodMetrics items = 2; +} + diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go new file mode 100644 index 0000000000000..205d253c779d4 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "metrics.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NodeMetrics{}, + &NodeMetricsList{}, + &PodMetrics{}, + &PodMetricsList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go new file mode 100644 index 0000000000000..cdf01aa3d839e --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +resourceName=nodes +// +genclient:readonly +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetrics sets resource usage metrics of a node. +type NodeMetrics struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` + Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` + + // The memory usage is the memory working set. + Usage v1.ResourceList `json:"usage" protobuf:"bytes,4,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetricsList is a list of NodeMetrics. +type NodeMetricsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of node metrics. + Items []NodeMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +resourceName=pods +// +genclient:readonly +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetrics sets resource usage metrics of a pod. +type PodMetrics struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` + Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` + + // Metrics for all containers are collected within the same time window. + // +listType=atomic + Containers []ContainerMetrics `json:"containers" protobuf:"bytes,4,rep,name=containers"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetricsList is a list of PodMetrics. +type PodMetricsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pod metrics. + Items []PodMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ContainerMetrics sets resource usage metrics of a container. +type ContainerMetrics struct { + // Container name corresponding to the one from pod.spec.containers. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The memory usage is the memory working set. + Usage v1.ResourceList `json:"usage" protobuf:"bytes,2,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000000..112c4c707d900 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go @@ -0,0 +1,209 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/api/core/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + metrics "k8s.io/metrics/pkg/apis/metrics" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*ContainerMetrics)(nil), (*metrics.ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(a.(*ContainerMetrics), b.(*metrics.ContainerMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.ContainerMetrics)(nil), (*ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(a.(*metrics.ContainerMetrics), b.(*ContainerMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeMetrics)(nil), (*metrics.NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(a.(*NodeMetrics), b.(*metrics.NodeMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.NodeMetrics)(nil), (*NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(a.(*metrics.NodeMetrics), b.(*NodeMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeMetricsList)(nil), (*metrics.NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(a.(*NodeMetricsList), b.(*metrics.NodeMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.NodeMetricsList)(nil), (*NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(a.(*metrics.NodeMetricsList), b.(*NodeMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PodMetrics)(nil), (*metrics.PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PodMetrics_To_metrics_PodMetrics(a.(*PodMetrics), b.(*metrics.PodMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.PodMetrics)(nil), (*PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_PodMetrics_To_v1beta1_PodMetrics(a.(*metrics.PodMetrics), b.(*PodMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PodMetricsList)(nil), (*metrics.PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(a.(*PodMetricsList), b.(*metrics.PodMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.PodMetricsList)(nil), (*PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(a.(*metrics.PodMetricsList), b.(*PodMetricsList), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { + out.Name = in.Name + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics is an autogenerated conversion function. +func Convert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { + return autoConvert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(in, out, s) +} + +func autoConvert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { + out.Name = in.Name + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics is an autogenerated conversion function. +func Convert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { + return autoConvert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(in, out, s) +} + +func autoConvert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics is an autogenerated conversion function. +func Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { + return autoConvert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(in, out, s) +} + +func autoConvert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_metrics_NodeMetrics_To_v1beta1_NodeMetrics is an autogenerated conversion function. +func Convert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { + return autoConvert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(in, out, s) +} + +func autoConvert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]metrics.NodeMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList is an autogenerated conversion function. +func Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { + return autoConvert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(in, out, s) +} + +func autoConvert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NodeMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList is an autogenerated conversion function. +func Convert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { + return autoConvert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(in, out, s) +} + +func autoConvert_v1beta1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Containers = *(*[]metrics.ContainerMetrics)(unsafe.Pointer(&in.Containers)) + return nil +} + +// Convert_v1beta1_PodMetrics_To_metrics_PodMetrics is an autogenerated conversion function. +func Convert_v1beta1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { + return autoConvert_v1beta1_PodMetrics_To_metrics_PodMetrics(in, out, s) +} + +func autoConvert_metrics_PodMetrics_To_v1beta1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Containers = *(*[]ContainerMetrics)(unsafe.Pointer(&in.Containers)) + return nil +} + +// Convert_metrics_PodMetrics_To_v1beta1_PodMetrics is an autogenerated conversion function. +func Convert_metrics_PodMetrics_To_v1beta1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { + return autoConvert_metrics_PodMetrics_To_v1beta1_PodMetrics(in, out, s) +} + +func autoConvert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]metrics.PodMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList is an autogenerated conversion function. +func Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { + return autoConvert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(in, out, s) +} + +func autoConvert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]PodMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_metrics_PodMetricsList_To_v1beta1_PodMetricsList is an autogenerated conversion function. +func Convert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { + return autoConvert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(in, out, s) +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..f043d4642ffa7 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,186 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { + *out = *in + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerMetrics. +func (in *ContainerMetrics) DeepCopy() *ContainerMetrics { + if in == nil { + return nil + } + out := new(ContainerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetrics) DeepCopyInto(out *NodeMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetrics. +func (in *NodeMetrics) DeepCopy() *NodeMetrics { + if in == nil { + return nil + } + out := new(NodeMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetricsList) DeepCopyInto(out *NodeMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetricsList. +func (in *NodeMetricsList) DeepCopy() *NodeMetricsList { + if in == nil { + return nil + } + out := new(NodeMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetrics) DeepCopyInto(out *PodMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ContainerMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetrics. +func (in *PodMetrics) DeepCopy() *PodMetrics { + if in == nil { + return nil + } + out := new(PodMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetricsList) DeepCopyInto(out *PodMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsList. +func (in *PodMetricsList) DeepCopy() *PodMetricsList { + if in == nil { + return nil + } + out := new(PodMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..c063c9b28a1d5 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go @@ -0,0 +1,186 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package metrics + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { + *out = *in + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerMetrics. +func (in *ContainerMetrics) DeepCopy() *ContainerMetrics { + if in == nil { + return nil + } + out := new(ContainerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetrics) DeepCopyInto(out *NodeMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetrics. +func (in *NodeMetrics) DeepCopy() *NodeMetrics { + if in == nil { + return nil + } + out := new(NodeMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetricsList) DeepCopyInto(out *NodeMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetricsList. +func (in *NodeMetricsList) DeepCopy() *NodeMetricsList { + if in == nil { + return nil + } + out := new(NodeMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetrics) DeepCopyInto(out *PodMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ContainerMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetrics. +func (in *PodMetrics) DeepCopy() *PodMetrics { + if in == nil { + return nil + } + out := new(PodMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetricsList) DeepCopyInto(out *PodMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsList. +func (in *PodMetricsList) DeepCopy() *PodMetricsList { + if in == nil { + return nil + } + out := new(PodMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 92392f4653acc..6ef6624037dcb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -10,7 +10,7 @@ github.com/AdaLogics/go-fuzz-headers # github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20250520111509-a70c2aa677fa ## explicit; go 1.18 github.com/AdamKorcz/go-118-fuzz-build/testing -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 ## explicit; go 1.23.0 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm @@ -33,7 +33,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 ## explicit; go 1.23.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal @@ -56,7 +56,7 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v7 ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm -# github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 +# github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 ## explicit; go 1.18 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential @@ -130,7 +130,7 @@ github.com/antlr4-go/antlr/v4 # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go-v2 v1.38.3 +# github.com/aws/aws-sdk-go-v2 v1.39.0 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/defaults @@ -157,10 +157,10 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/config v1.31.6 +# github.com/aws/aws-sdk-go-v2/config v1.31.8 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.18.10 +# github.com/aws/aws-sdk-go-v2/credentials v1.18.12 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -169,20 +169,20 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 # github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/ini -# github.com/aws/aws-sdk-go-v2/service/ec2 v1.251.0 +# github.com/aws/aws-sdk-go-v2/service/ec2 v1.253.0 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/ec2 github.com/aws/aws-sdk-go-v2/service/ec2/internal/endpoints @@ -190,20 +190,20 @@ github.com/aws/aws-sdk-go-v2/service/ec2/types # github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 +# github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 +# github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints @@ -248,7 +248,7 @@ github.com/chai2010/gettext-go github.com/chai2010/gettext-go/mo github.com/chai2010/gettext-go/plural github.com/chai2010/gettext-go/po -# github.com/cilium/charts v0.0.0-20250815135331-8443b5068217 +# github.com/cilium/charts v0.0.0-20250904103431-47fb6519b6bb ## explicit; go 1.17 github.com/cilium/charts # github.com/cilium/coverbee v0.3.3-0.20240723084546-664438750fce @@ -426,7 +426,7 @@ github.com/dgryski/go-farm # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference -# github.com/docker/docker v28.3.3+incompatible +# github.com/docker/docker v28.4.0+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -450,8 +450,6 @@ github.com/docker/docker/api/types/time github.com/docker/docker/api/types/versions github.com/docker/docker/api/types/volume github.com/docker/docker/client -github.com/docker/docker/internal/lazyregexp -github.com/docker/docker/internal/multierror github.com/docker/docker/pkg/homedir github.com/docker/docker/pkg/ioutils github.com/docker/docker/pkg/plugingetter @@ -951,7 +949,7 @@ github.com/golang/protobuf/proto # github.com/google/btree v1.1.3 ## explicit; go 1.18 github.com/google/btree -# github.com/google/cel-go v0.26.0 +# github.com/google/cel-go v0.26.1 ## explicit; go 1.22.0 github.com/google/cel-go/cel github.com/google/cel-go/checker @@ -1322,7 +1320,7 @@ github.com/pelletier/go-toml/v2/unstable # github.com/peterbourgon/diskv v2.0.1+incompatible ## explicit github.com/peterbourgon/diskv -# github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 +# github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe ## explicit; go 1.17 github.com/petermattis/goid # github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c @@ -1346,7 +1344,7 @@ github.com/pmezard/go-difflib/difflib # github.com/prometheus-community/pro-bing v0.7.0 ## explicit; go 1.23.0 github.com/prometheus-community/pro-bing -# github.com/prometheus/client_golang v1.23.0 +# github.com/prometheus/client_golang v1.23.2 ## explicit; go 1.23.0 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header @@ -1361,7 +1359,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.65.0 +# github.com/prometheus/common v0.66.1 ## explicit; go 1.23.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model @@ -1387,7 +1385,7 @@ github.com/sagikazarmark/locafero ## explicit; go 1.21 github.com/santhosh-tekuri/jsonschema/v6 github.com/santhosh-tekuri/jsonschema/v6/kind -# github.com/sasha-s/go-deadlock v0.3.5 +# github.com/sasha-s/go-deadlock v0.3.6 ## explicit github.com/sasha-s/go-deadlock # github.com/sergi/go-diff v1.3.1 @@ -1414,11 +1412,11 @@ github.com/spf13/afero/mem ## explicit; go 1.21.0 github.com/spf13/cast github.com/spf13/cast/internal -# github.com/spf13/cobra v1.9.1 +# github.com/spf13/cobra v1.10.1 ## explicit; go 1.15 github.com/spf13/cobra github.com/spf13/cobra/doc -# github.com/spf13/pflag v1.0.7 +# github.com/spf13/pflag v1.0.10 ## explicit; go 1.12 github.com/spf13/pflag # github.com/spf13/viper v1.20.1 @@ -1677,8 +1675,8 @@ golang.org/x/exp/slices golang.org/x/exp/slog golang.org/x/exp/slog/internal golang.org/x/exp/slog/internal/buffer -# golang.org/x/mod v0.27.0 -## explicit; go 1.23.0 +# golang.org/x/mod v0.28.0 +## explicit; go 1.24.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module @@ -1708,26 +1706,26 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.30.0 -## explicit; go 1.23.0 +# golang.org/x/oauth2 v0.31.0 +## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.16.0 -## explicit; go 1.23.0 +# golang.org/x/sync v0.17.0 +## explicit; go 1.24.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.35.0 -## explicit; go 1.23.0 +# golang.org/x/sys v0.36.0 +## explicit; go 1.24.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.34.0 -## explicit; go 1.23.0 +# golang.org/x/term v0.35.0 +## explicit; go 1.24.0 golang.org/x/term -# golang.org/x/text v0.28.0 -## explicit; go 1.23.0 +# golang.org/x/text v0.29.0 +## explicit; go 1.24.0 golang.org/x/text/cases golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -1757,8 +1755,8 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.12.0 -## explicit; go 1.23.0 +# golang.org/x/time v0.13.0 +## explicit; go 1.24.0 golang.org/x/time/rate # golang.org/x/tools v0.36.0 ## explicit; go 1.23.0 @@ -1832,8 +1830,8 @@ gomodules.xyz/jsonpatch/v2 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c -## explicit; go 1.23.0 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250826171959-ef028d996bc1 +## explicit; go 1.24.0 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.75.0 ## explicit; go 1.23.0 @@ -1972,7 +1970,7 @@ gopkg.in/yaml.v2 gopkg.in/yaml.v3 # gotest.tools/v3 v3.5.0 ## explicit; go 1.17 -# helm.sh/helm/v3 v3.18.5 +# helm.sh/helm/v3 v3.18.6 ## explicit; go 1.24.0 helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/resolver @@ -2014,7 +2012,7 @@ helm.sh/helm/v3/pkg/strvals helm.sh/helm/v3/pkg/time helm.sh/helm/v3/pkg/time/ctime helm.sh/helm/v3/pkg/uploader -# k8s.io/api v0.34.0 +# k8s.io/api v0.34.1 ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -2076,7 +2074,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.34.0 +# k8s.io/apiextensions-apiserver v0.34.1 ## explicit; go 1.24.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -2094,7 +2092,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextension k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.34.0 +# k8s.io/apimachinery v0.34.1 ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -2167,7 +2165,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.34.0 +# k8s.io/apiserver v0.34.1 ## explicit; go 1.24.0 k8s.io/apiserver/pkg/apis/cel k8s.io/apiserver/pkg/authentication/serviceaccount @@ -2182,13 +2180,13 @@ k8s.io/apiserver/pkg/endpoints/deprecation k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/util/compatibility k8s.io/apiserver/pkg/util/feature -# k8s.io/cli-runtime v0.34.0 +# k8s.io/cli-runtime v0.34.1 ## explicit; go 1.24.0 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.34.0 +# k8s.io/client-go v0.34.1 ## explicit; go 1.24.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -2547,7 +2545,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.34.0 +# k8s.io/code-generator v0.34.1 ## explicit; go 1.24.0 k8s.io/code-generator k8s.io/code-generator/cmd/applyconfiguration-gen @@ -2587,7 +2585,7 @@ k8s.io/code-generator/cmd/validation-gen/validators k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.34.0 +# k8s.io/component-base v0.34.1 ## explicit; go 1.24.0 k8s.io/component-base/cli/flag k8s.io/component-base/compatibility @@ -2644,7 +2642,7 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/kubectl v0.34.0 +# k8s.io/kubectl v0.34.1 ## explicit; go 1.24.0 k8s.io/kubectl/pkg/cmd/util k8s.io/kubectl/pkg/scheme @@ -2656,6 +2654,10 @@ k8s.io/kubectl/pkg/util/podutils k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation +# k8s.io/metrics v0.34.1 +## explicit; go 1.24.0 +k8s.io/metrics/pkg/apis/metrics +k8s.io/metrics/pkg/apis/metrics/v1beta1 # k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d ## explicit; go 1.18 k8s.io/utils/buffer @@ -2870,7 +2872,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field sigs.k8s.io/kustomize/kyaml/yaml/merge2 sigs.k8s.io/kustomize/kyaml/yaml/schema sigs.k8s.io/kustomize/kyaml/yaml/walk -# sigs.k8s.io/mcs-api v0.1.1-0.20250610011024-38bab5ba476b +# sigs.k8s.io/mcs-api v0.3.1-0.20250908090929-79efdd37ed2b ## explicit; go 1.23.0 sigs.k8s.io/mcs-api/pkg/apis/v1alpha1 sigs.k8s.io/mcs-api/pkg/client/clientset/versioned diff --git a/vendor/sigs.k8s.io/mcs-api/pkg/apis/v1alpha1/serviceexport.go b/vendor/sigs.k8s.io/mcs-api/pkg/apis/v1alpha1/serviceexport.go index 869b8142d4152..fb79ead9afc6d 100644 --- a/vendor/sigs.k8s.io/mcs-api/pkg/apis/v1alpha1/serviceexport.go +++ b/vendor/sigs.k8s.io/mcs-api/pkg/apis/v1alpha1/serviceexport.go @@ -20,6 +20,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + // ServiceExportPluralName is the plural name of ServiceExport + ServiceExportPluralName = "serviceexports" + // ServiceExportKindName is the kind name of ServiceExport + ServiceExportKindName = "ServiceExport" + // ServiceExportFullName is the full name of ServiceExport + ServiceExportFullName = ServiceExportPluralName + "." + GroupName +) + +// ServiceExportVersionedName is the versioned name of ServiceExport +var ServiceExportVersionedName = ServiceExportKindName + "/" + GroupVersion.Version + // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:shortName={svcex,svcexport} @@ -66,6 +78,8 @@ const ( // service export has been recognized as valid by an mcs-controller. // This will be false if the service is found to be unexportable // (ExternalName, not found). + // + // Deprecated: use ServiceExportConditionValid instead ServiceExportValid = "Valid" // ServiceExportConflict means that there is a conflict between two // exports for the same Service. When "True", the condition message @@ -73,6 +87,8 @@ const ( // field(s) under contention, which cluster won, and why. // Users should not expect detailed per-cluster information in the // conflict message. + // + // Deprecated: use ServiceExportConditionConflict instead ServiceExportConflict = "Conflict" ) @@ -88,3 +104,168 @@ type ServiceExportList struct { // +listType=set Items []ServiceExport `json:"items"` } + +// ServiceExportConditionType is a type of condition associated with a +// ServiceExport. This type should be used with the ServiceExportStatus.Conditions +// field. +type ServiceExportConditionType string + +// ServiceExportConditionReason defines the set of reasons that explain why a +// particular ServiceExport condition type has been raised. +type ServiceExportConditionReason string + +// NewServiceExportCondition creates a new ServiceExport condition +func NewServiceExportCondition(t ServiceExportConditionType, status metav1.ConditionStatus, reason ServiceExportConditionReason, msg string) metav1.Condition { + return metav1.Condition{ + Type: string(t), + Status: status, + Reason: string(reason), + Message: msg, + LastTransitionTime: metav1.Now(), + } +} + +const ( + // ServiceExportConditionValid is true when the Service Export is valid. + // This does not indicate whether or not the configuration has been exported + // to a control plane / data plane. + // + // + // Possible reasons for this condition to be true are: + // + // * "Valid" + // + // Possible reasons for this condition to be False are: + // + // * "NoService" + // * "InvalidServiceType" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + ServiceExportConditionValid ServiceExportConditionType = "Valid" + + // ServiceExportReasonValid is used with the "Valid" condition when the + // condition is True. + ServiceExportReasonValid ServiceExportConditionReason = "Valid" + + // ServiceExportReasonNoService is used with the "Valid" condition when + // the associated Service does not exist. + ServiceExportReasonNoService ServiceExportConditionReason = "NoService" + + // ServiceExportReasonInvalidServiceType is used with the "Valid" + // condition when the associated Service has an invalid type + // (per the KEP at least the ExternalName type). + ServiceExportReasonInvalidServiceType ServiceExportConditionReason = "InvalidServiceType" +) + +const ( + // ServiceExportConditionReady is true when the service is exported + // to some control plane or data plane or ready to be pulled. + // + // + // Possible reasons for this condition to be true are: + // + // * "Exported" + // * "Ready" + // + // Possible reasons for this condition to be False are: + // + // * "Pending" + // * "Failed" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + ServiceExportConditionReady ServiceExportConditionType = "Ready" + + // ServiceExportReasonExported is used with the "Ready" condition + // when the condition is True and the service has been exported. + // This would be used when an implementation exports a service + // to a control plane or data plane. + ServiceExportReasonExported ServiceExportConditionReason = "Exported" + + // ServiceExportReasonReady is used with the "Ready" condition + // when the condition is True and the service has been exported. + // This would typically be used in an implementation that uses a + // pull model. + ServiceExportReasonReady ServiceExportConditionReason = "Ready" + + // ServiceExportReasonPending is used with the "Ready" condition + // when the service is in the process of being exported. + ServiceExportReasonPending ServiceExportConditionReason = "Pending" + + // ServiceExportReasonFailed is used with the "Ready" condition + // when the service failed to be exported with the message providing + // the specific reason. + ServiceExportReasonFailed ServiceExportConditionReason = "Failed" +) + +const ( + // ServiceExportConditionConflict indicates that some property of an + // exported service has conflicting values across the constituent + // ServiceExports. This condition must be at least raised on the + // conflicting ServiceExport and is recommended to be raised on all on + // all the constituent ServiceExports if feasible. + // + // + // Possible reasons for this condition to be true are: + // + // * "PortConflict" + // * "TypeConflict" + // * "SessionAffinityConflict" + // * "SessionAffinityConfigConflict" + // * "AnnotationsConflict" + // * "LabelsConflict" + // + // When multiple conflicts occurs the above reasons may be combined + // using commas. + // + // Possible reasons for this condition to be False are: + // + // * "NoConflicts" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + ServiceExportConditionConflict ServiceExportConditionType = "Conflict" + + // ServiceExportReasonPortConflict is used with the "Conflict" condition + // when the exported service has a conflict related to port configuration. + // This includes when ports on resulting imported services would have + // duplicated names (including unnamed/empty name) or duplicated + // port/protocol pairs. + ServiceExportReasonPortConflict ServiceExportConditionReason = "PortConflict" + + // ServiceExportReasonTypeConflict is used with the "Conflict" condition + // when the exported service has a conflict related to the service type + // (eg headless vs non-headless). + ServiceExportReasonTypeConflict ServiceExportConditionReason = "TypeConflict" + + // ServiceExportReasonSessionAffinityConflict is used with the "Conflict" + // condition when the exported service has a conflict related to session affinity. + ServiceExportReasonSessionAffinityConflict ServiceExportConditionReason = "SessionAffinityConflict" + + // ServiceExportReasonSessionAffinityConfigConflict is used with the + // "Conflict" condition when the exported service has a conflict related + // to session affinity config. + ServiceExportReasonSessionAffinityConfigConflict ServiceExportConditionReason = "SessionAffinityConfigConflict" + + // ServiceExportReasonLabelsConflict is used with the "Conflict" + // condition when the ServiceExport has a conflict related to exported + // labels. + ServiceExportReasonLabelsConflict ServiceExportConditionReason = "LabelsConflict" + + // ServiceExportReasonAnnotationsConflict is used with the "Conflict" + // condition when the ServiceExport has a conflict related to exported + // annotations. + ServiceExportReasonAnnotationsConflict ServiceExportConditionReason = "AnnotationsConflict" + + // ServiceExportReasonNoConflicts is used with the "Conflict" condition + // when the condition is False. + ServiceExportReasonNoConflicts ServiceExportConditionReason = "NoConflicts" +) diff --git a/vendor/sigs.k8s.io/mcs-api/pkg/apis/v1alpha1/serviceimport.go b/vendor/sigs.k8s.io/mcs-api/pkg/apis/v1alpha1/serviceimport.go index d6b908ddd4e00..8810a632d1f5b 100644 --- a/vendor/sigs.k8s.io/mcs-api/pkg/apis/v1alpha1/serviceimport.go +++ b/vendor/sigs.k8s.io/mcs-api/pkg/apis/v1alpha1/serviceimport.go @@ -21,6 +21,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + // ServiceImportPluralName is the plural name of ServiceImport + ServiceImportPluralName = "serviceimports" + // ServiceImportKindName is the kind name of ServiceImport + ServiceImportKindName = "ServiceImport" + // ServiceImportFullName is the full name of ServiceImport + ServiceImportFullName = ServiceImportPluralName + "." + GroupName +) + +// ServiceImportVersionedName is the versioned name of ServiceImport +var ServiceImportVersionedName = ServiceImportKindName + "/" + GroupVersion.Version + // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:shortName={svcim,svcimport} @@ -120,6 +132,12 @@ type ServiceImportStatus struct { // +listType=map // +listMapKey=cluster Clusters []ClusterStatus `json:"clusters,omitempty"` + // +optional + // +patchStrategy=merge + // +patchMergeKey=type + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } // ClusterStatus contains service configuration mapped to a specific source cluster @@ -141,3 +159,59 @@ type ServiceImportList struct { // +listType=set Items []ServiceImport `json:"items"` } + +// ServiceImportConditionType is a type of condition associated with a +// ServiceImport. This type should be used with the ServiceImportStatus.Conditions +// field. +type ServiceImportConditionType string + +// ServiceImportConditionReason defines the set of reasons that explain why a +// particular ServiceImport condition type has been raised. +type ServiceImportConditionReason string + +// NewServiceImportCondition creates a new ServiceImport condition +func NewServiceImportCondition(t ServiceImportConditionType, status metav1.ConditionStatus, reason ServiceImportConditionReason, msg string) metav1.Condition { + return metav1.Condition{ + Type: string(t), + Status: status, + Reason: string(reason), + Message: msg, + LastTransitionTime: metav1.Now(), + } +} + +const ( + // ServiceImportConditionReady is true when the Service Import is ready. + // + // + // Possible reasons for this condition to be true are: + // + // * "Ready" + // + // Possible reasons for this condition to be False are: + // + // * "Pending" + // * "IPFamilyNotSupported" + // + // Possible reasons for this condition to be Unknown are: + // + // * "Pending" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + ServiceImportConditionReady ServiceImportConditionType = "Ready" + + // ServiceImportReasonReady is used with the "Ready" condition when the + // condition is True. + ServiceImportReasonReady ServiceImportConditionReason = "Ready" + + // ServiceImportReasonPending is used with the "Ready" condition when + // the ServiceImport is in the process of being created or updated. + ServiceImportReasonPending ServiceImportConditionReason = "Pending" + + // ServiceImportReasonIPFamilyNotSupported is used with the "Ready" + // condition when the service can not be imported due to IP families + // mismatch. + ServiceImportReasonIPFamilyNotSupported ServiceImportConditionReason = "IPFamilyNotSupported" +) diff --git a/vendor/sigs.k8s.io/mcs-api/pkg/apis/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/mcs-api/pkg/apis/v1alpha1/zz_generated.deepcopy.go index 4e6fd4d49f8a6..72181237a6674 100644 --- a/vendor/sigs.k8s.io/mcs-api/pkg/apis/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/sigs.k8s.io/mcs-api/pkg/apis/v1alpha1/zz_generated.deepcopy.go @@ -250,6 +250,13 @@ func (in *ServiceImportStatus) DeepCopyInto(out *ServiceImportStatus) { *out = make([]ClusterStatus, len(*in)) copy(*out, *in) } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceImportStatus.